aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/gpu.tmpl597
-rw-r--r--Documentation/IPMI.txt7
-rw-r--r--Documentation/arm/keystone/Overview.txt18
-rw-r--r--Documentation/block/null_blk.txt3
-rw-r--r--Documentation/devicetree/bindings/display/exynos/exynos_dp.txt41
-rw-r--r--Documentation/devicetree/bindings/display/msm/dsi.txt12
-rw-r--r--Documentation/devicetree/bindings/display/msm/mdp.txt26
-rw-r--r--Documentation/devicetree/bindings/media/exynos5-gsc.txt4
-rw-r--r--Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt6
-rw-r--r--Documentation/devicetree/bindings/thermal/rockchip-thermal.txt4
-rw-r--r--Documentation/i2c/busses/i2c-i8011
-rw-r--r--Documentation/kernel-parameters.txt3
-rw-r--r--MAINTAINERS58
-rw-r--r--Makefile2
-rw-r--r--arch/arc/configs/axs101_defconfig2
-rw-r--r--arch/arc/configs/axs103_defconfig2
-rw-r--r--arch/arc/configs/axs103_smp_defconfig2
-rw-r--r--arch/arc/configs/nsim_hs_defconfig2
-rw-r--r--arch/arc/configs/nsim_hs_smp_defconfig2
-rw-r--r--arch/arc/configs/nsimosci_hs_defconfig2
-rw-r--r--arch/arc/configs/nsimosci_hs_smp_defconfig2
-rw-r--r--arch/arc/configs/vdk_hs38_defconfig2
-rw-r--r--arch/arc/configs/vdk_hs38_smp_defconfig2
-rw-r--r--arch/arc/include/asm/irqflags-arcv2.h3
-rw-r--r--arch/arc/include/asm/irqflags-compact.h2
-rw-r--r--arch/arc/kernel/ctx_sw.c2
-rw-r--r--arch/arc/kernel/ctx_sw_asm.S3
-rw-r--r--arch/arc/kernel/process.c9
-rw-r--r--arch/arc/kernel/unwind.c37
-rw-r--r--arch/arc/mm/tlb.c4
-rw-r--r--arch/arm/Kconfig4
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15.dts1
-rw-r--r--arch/arm/boot/dts/animeo_ip.dts6
-rw-r--r--arch/arm/boot/dts/armada-38x.dtsi1
-rw-r--r--arch/arm/boot/dts/at91-foxg20.dts2
-rw-r--r--arch/arm/boot/dts/at91-kizbox.dts13
-rw-r--r--arch/arm/boot/dts/at91-kizbox2.dts6
-rw-r--r--arch/arm/boot/dts/at91-kizboxmini.dts4
-rw-r--r--arch/arm/boot/dts/at91-qil_a9260.dts2
-rw-r--r--arch/arm/boot/dts/at91-sama5d2_xplained.dts115
-rw-r--r--arch/arm/boot/dts/at91-sama5d3_xplained.dts2
-rw-r--r--arch/arm/boot/dts/at91-sama5d4_xplained.dts12
-rw-r--r--arch/arm/boot/dts/at91-sama5d4ek.dts12
-rw-r--r--arch/arm/boot/dts/at91rm9200ek.dts9
-rw-r--r--arch/arm/boot/dts/at91sam9261ek.dts19
-rw-r--r--arch/arm/boot/dts/at91sam9263ek.dts13
-rw-r--r--arch/arm/boot/dts/at91sam9g20ek_common.dtsi13
-rw-r--r--arch/arm/boot/dts/at91sam9m10g45ek.dts13
-rw-r--r--arch/arm/boot/dts/at91sam9n12ek.dts11
-rw-r--r--arch/arm/boot/dts/at91sam9rlek.dts13
-rw-r--r--arch/arm/boot/dts/at91sam9x5cm.dtsi11
-rw-r--r--arch/arm/boot/dts/dra7.dtsi4
-rw-r--r--arch/arm/boot/dts/exynos5800-peach-pi.dts15
-rw-r--r--arch/arm/boot/dts/imx27.dtsi16
-rw-r--r--arch/arm/boot/dts/k2l-netcp.dtsi2
-rw-r--r--arch/arm/boot/dts/kirkwood-ts219.dtsi2
-rw-r--r--arch/arm/boot/dts/rk3288-veyron-minnie.dts4
-rw-r--r--arch/arm/boot/dts/rk3288.dtsi10
-rw-r--r--arch/arm/boot/dts/sama5d35ek.dts2
-rw-r--r--arch/arm/boot/dts/sama5d4.dtsi2
-rw-r--r--arch/arm/boot/dts/usb_a9260_common.dtsi2
-rw-r--r--arch/arm/boot/dts/usb_a9263.dts2
-rw-r--r--arch/arm/boot/dts/vfxxx.dtsi8
-rw-r--r--arch/arm/configs/at91_dt_defconfig1
-rw-r--r--arch/arm/configs/sama5_defconfig1
-rw-r--r--arch/arm/include/asm/irq.h5
-rw-r--r--arch/arm/include/asm/kvm_emulate.h12
-rw-r--r--arch/arm/include/uapi/asm/unistd.h1
-rw-r--r--arch/arm/kernel/bios32.c19
-rw-r--r--arch/arm/kernel/calls.S1
-rw-r--r--arch/arm/kvm/arm.c7
-rw-r--r--arch/arm/kvm/mmio.c5
-rw-r--r--arch/arm/kvm/mmu.c15
-rw-r--r--arch/arm/kvm/psci.c20
-rw-r--r--arch/arm/mach-dove/include/mach/entry-macro.S4
-rw-r--r--arch/arm/mach-imx/gpc.c1
-rw-r--r--arch/arm/mach-omap2/omap-smp.c6
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c66
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.h3
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c56
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_81xx_data.c3
-rw-r--r--arch/arm/mach-omap2/pdata-quirks.c29
-rw-r--r--arch/arm/mach-omap2/pm34xx.c4
-rw-r--r--arch/arm/mach-orion5x/include/mach/entry-macro.S2
-rw-r--r--arch/arm/mach-pxa/palm27x.c2
-rw-r--r--arch/arm/mach-pxa/palmtc.c2
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7793.c2
-rw-r--r--arch/arm/mach-zx/Kconfig2
-rw-r--r--arch/arm/net/bpf_jit_32.c2
-rw-r--r--arch/arm64/Kconfig23
-rw-r--r--arch/arm64/crypto/aes-ce-cipher.c2
-rw-r--r--arch/arm64/include/asm/barrier.h16
-rw-r--r--arch/arm64/include/asm/compat.h3
-rw-r--r--arch/arm64/include/asm/cpufeature.h25
-rw-r--r--arch/arm64/include/asm/dma-mapping.h13
-rw-r--r--arch/arm64/include/asm/hw_breakpoint.h6
-rw-r--r--arch/arm64/include/asm/irq.h5
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h18
-rw-r--r--arch/arm64/include/asm/mmu_context.h2
-rw-r--r--arch/arm64/include/asm/pgtable.h1
-rw-r--r--arch/arm64/kernel/cpu_errata.c9
-rw-r--r--arch/arm64/kernel/cpufeature.c37
-rw-r--r--arch/arm64/kernel/cpuinfo.c5
-rw-r--r--arch/arm64/kernel/efi.c45
-rw-r--r--arch/arm64/kernel/suspend.c10
-rw-r--r--arch/arm64/kvm/handle_exit.c2
-rw-r--r--arch/arm64/kvm/hyp.S14
-rw-r--r--arch/arm64/kvm/inject_fault.c2
-rw-r--r--arch/arm64/kvm/sys_regs.c123
-rw-r--r--arch/arm64/kvm/sys_regs.h8
-rw-r--r--arch/arm64/kvm/sys_regs_generic_v8.c4
-rw-r--r--arch/arm64/mm/context.c38
-rw-r--r--arch/arm64/mm/dma-mapping.c35
-rw-r--r--arch/arm64/mm/fault.c28
-rw-r--r--arch/arm64/mm/mmu.c91
-rw-r--r--arch/arm64/net/bpf_jit_comp.c85
-rw-r--r--arch/m68k/coldfire/m54xx.c2
-rw-r--r--arch/m68k/include/asm/unistd.h2
-rw-r--r--arch/m68k/include/uapi/asm/unistd.h1
-rw-r--r--arch/m68k/kernel/setup_no.c9
-rw-r--r--arch/m68k/kernel/syscalltable.S1
-rw-r--r--arch/m68k/mm/motorola.c2
-rw-r--r--arch/m68k/sun3/config.c4
-rw-r--r--arch/mips/ath79/setup.c7
-rw-r--r--arch/mips/boot/dts/qca/ar9132.dtsi2
-rw-r--r--arch/mips/include/asm/page.h3
-rw-r--r--arch/mips/kvm/emulate.c2
-rw-r--r--arch/mips/kvm/locore.S16
-rw-r--r--arch/mips/kvm/mips.c5
-rw-r--r--arch/mips/pci/pci-rt2880.c4
-rw-r--r--arch/mips/pmcs-msp71xx/msp_setup.c4
-rw-r--r--arch/mips/sni/reset.c6
-rw-r--r--arch/mn10300/Kconfig4
-rw-r--r--arch/nios2/mm/cacheflush.c24
-rw-r--r--arch/parisc/Kconfig3
-rw-r--r--arch/parisc/include/asm/hugetlb.h85
-rw-r--r--arch/parisc/include/asm/page.h13
-rw-r--r--arch/parisc/include/asm/pgalloc.h2
-rw-r--r--arch/parisc/include/asm/pgtable.h26
-rw-r--r--arch/parisc/include/asm/processor.h27
-rw-r--r--arch/parisc/include/uapi/asm/mman.h10
-rw-r--r--arch/parisc/kernel/asm-offsets.c8
-rw-r--r--arch/parisc/kernel/entry.S56
-rw-r--r--arch/parisc/kernel/head.S4
-rw-r--r--arch/parisc/kernel/setup.c14
-rw-r--r--arch/parisc/kernel/syscall.S4
-rw-r--r--arch/parisc/kernel/traps.c35
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S9
-rw-r--r--arch/parisc/mm/Makefile1
-rw-r--r--arch/parisc/mm/hugetlbpage.c161
-rw-r--r--arch/parisc/mm/init.c40
-rw-r--r--arch/powerpc/include/asm/reg.h1
-rw-r--r--arch/powerpc/include/asm/systbl.h1
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h1
-rw-r--r--arch/powerpc/kernel/process.c18
-rw-r--r--arch/powerpc/kernel/signal_32.c14
-rw-r--r--arch/powerpc/kernel/signal_64.c4
-rw-r--r--arch/s390/include/asm/cio.h1
-rw-r--r--arch/s390/include/asm/elf.h13
-rw-r--r--arch/s390/include/asm/ipl.h3
-rw-r--r--arch/s390/include/asm/pci_dma.h4
-rw-r--r--arch/s390/include/asm/trace/diag.h6
-rw-r--r--arch/s390/include/uapi/asm/unistd.h19
-rw-r--r--arch/s390/kernel/compat_wrapper.c1
-rw-r--r--arch/s390/kernel/diag.c4
-rw-r--r--arch/s390/kernel/head.S95
-rw-r--r--arch/s390/kernel/ipl.c65
-rw-r--r--arch/s390/kernel/process.c6
-rw-r--r--arch/s390/kernel/sclp.c2
-rw-r--r--arch/s390/kernel/setup.c3
-rw-r--r--arch/s390/kernel/syscalls.S1
-rw-r--r--arch/s390/kernel/trace.c6
-rw-r--r--arch/s390/kvm/interrupt.c7
-rw-r--r--arch/s390/kvm/kvm-s390.c6
-rw-r--r--arch/s390/kvm/priv.c2
-rw-r--r--arch/s390/kvm/sigp.c8
-rw-r--r--arch/s390/mm/init.c30
-rw-r--r--arch/s390/mm/mmap.c60
-rw-r--r--arch/s390/pci/pci_dma.c84
-rw-r--r--arch/x86/boot/boot.h1
-rw-r--r--arch/x86/boot/video-mode.c2
-rw-r--r--arch/x86/boot/video.c2
-rw-r--r--arch/x86/entry/entry_64.S19
-rw-r--r--arch/x86/include/asm/msr-index.h3
-rw-r--r--arch/x86/include/asm/page_types.h16
-rw-r--r--arch/x86/include/asm/pgtable_types.h14
-rw-r--r--arch/x86/include/asm/x86_init.h1
-rw-r--r--arch/x86/kernel/cpu/common.c3
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c1
-rw-r--r--arch/x86/kernel/early-quirks.c1
-rw-r--r--arch/x86/kernel/fpu/signal.c11
-rw-r--r--arch/x86/kernel/fpu/xstate.c1
-rw-r--r--arch/x86/kernel/mcount_64.S6
-rw-r--r--arch/x86/kernel/pmem.c12
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/kernel/signal.c17
-rw-r--r--arch/x86/kernel/smpboot.c9
-rw-r--r--arch/x86/kvm/vmx.c5
-rw-r--r--arch/x86/kvm/x86.c61
-rw-r--r--arch/x86/mm/mpx.c53
-rw-r--r--arch/x86/pci/bus_numa.c13
-rw-r--r--block/blk-core.c21
-rw-r--r--block/blk-merge.c35
-rw-r--r--block/blk-mq.c14
-rw-r--r--block/blk-settings.c36
-rw-r--r--block/blk-sysfs.c3
-rw-r--r--block/blk-timeout.c8
-rw-r--r--block/blk.h2
-rw-r--r--block/noop-iosched.c10
-rw-r--r--block/partition-generic.c2
-rw-r--r--block/partitions/mac.c10
-rw-r--r--crypto/algif_aead.c4
-rw-r--r--crypto/algif_skcipher.c6
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/Kconfig4
-rw-r--r--drivers/acpi/cppc_acpi.c2
-rw-r--r--drivers/acpi/ec.c2
-rw-r--r--drivers/acpi/nfit.c65
-rw-r--r--drivers/acpi/nfit.h3
-rw-r--r--drivers/acpi/pci_root.c7
-rw-r--r--drivers/acpi/sbshc.c48
-rw-r--r--drivers/base/power/domain.c3
-rw-r--r--drivers/base/power/domain_governor.c3
-rw-r--r--drivers/base/power/wakeirq.c6
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c6
-rw-r--r--drivers/block/null_blk.c301
-rw-r--r--drivers/block/rbd.c1
-rw-r--r--drivers/bus/omap-ocp2scp.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c82
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c8
-rw-r--r--drivers/clocksource/Kconfig1
-rw-r--r--drivers/clocksource/fsl_ftm_timer.c4
-rw-r--r--drivers/cpufreq/Kconfig.arm3
-rw-r--r--drivers/cpufreq/Kconfig.x861
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c3
-rw-r--r--drivers/cpufreq/cpufreq.c21
-rw-r--r--drivers/cpufreq/intel_pstate.c322
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c2
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c3
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c2
-rw-r--r--drivers/crypto/talitos.c2
-rw-r--r--drivers/dma/at_hdmac.c20
-rw-r--r--drivers/dma/at_hdmac_regs.h6
-rw-r--r--drivers/dma/at_xdmac.c20
-rw-r--r--drivers/dma/edma.c4
-rw-r--r--drivers/dma/imx-sdma.c2
-rw-r--r--drivers/dma/sh/usb-dmac.c11
-rw-r--r--drivers/gpio/gpio-74xx-mmio.c7
-rw-r--r--drivers/gpio/gpio-omap.c2
-rw-r--r--drivers/gpio/gpio-palmas.c2
-rw-r--r--drivers/gpio/gpio-syscon.c6
-rw-r--r--drivers/gpio/gpio-tegra.c105
-rw-r--r--drivers/gpio/gpiolib.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h126
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c185
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c112
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c101
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h94
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c172
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c302
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c24
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h24
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c151
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h11
-rw-r--r--drivers/gpu/drm/amd/scheduler/sched_fence.c23
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c4
-rw-r--r--drivers/gpu/drm/armada/armada_fb.c4
-rw-r--r--drivers/gpu/drm/armada/armada_fb.h2
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h3
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c2
-rw-r--r--drivers/gpu/drm/ast/ast_main.c4
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c2
-rw-r--r--drivers/gpu/drm/bochs/bochs.h2
-rw-r--r--drivers/gpu/drm/bochs/bochs_fbdev.c2
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c2
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c4
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h3
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c4
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c2
-rw-r--r--drivers/gpu/drm/drm_atomic.c163
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c347
-rw-r--r--drivers/gpu/drm/drm_bridge.c69
-rw-r--r--drivers/gpu/drm/drm_crtc.c99
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c101
-rw-r--r--drivers/gpu/drm/drm_drv.c5
-rw-r--r--drivers/gpu/drm/drm_edid.c62
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c4
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c51
-rw-r--r--drivers/gpu/drm/drm_fops.c142
-rw-r--r--drivers/gpu/drm/drm_gem.c35
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c10
-rw-r--r--drivers/gpu/drm/drm_irq.c54
-rw-r--r--drivers/gpu/drm/drm_modes.c78
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c89
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c13
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c177
-rw-r--r--drivers/gpu/drm/drm_rect.c7
-rw-r--r--drivers/gpu/drm/drm_sysfs.c54
-rw-r--r--drivers/gpu/drm/exynos/Kconfig2
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c104
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c161
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c181
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c76
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h81
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c153
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.h5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c142
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h28
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c35
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c214
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.h7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c33
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c51
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c239
-rw-r--r--drivers/gpu/drm/exynos/regs-gsc.h4
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c2
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c3
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c7
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c6
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c18
-rw-r--r--drivers/gpu/drm/gma500/gem.c19
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c13
-rw-r--r--drivers/gpu/drm/gma500/gtt.c1
-rw-r--r--drivers/gpu/drm/gma500/mdfld_device.c2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_dpi.c2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c5
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c22
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h4
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h3
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c7
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c8
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c16
-rw-r--r--drivers/gpu/drm/i915/Kconfig1
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/dvo.h3
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c37
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c316
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c26
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c316
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h261
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c99
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence.c38
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c58
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c40
-rw-r--r--drivers/gpu/drm/i915/i915_guc_reg.h53
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c49
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c214
-rw-r--r--drivers/gpu/drm/i915/i915_params.c15
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h2752
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c3
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h4
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c6
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.h14
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c3
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c2
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c23
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c5
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c80
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c284
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c220
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1053
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c1114
-rw-r--r--drivers/gpu/drm/i915/intel_dp_link_training.c323
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c20
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h175
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c72
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h2
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c29
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c742
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c50
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c143
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h12
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h72
-rw-r--r--drivers/gpu/drm/i915/intel_guc_loader.c105
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c124
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c84
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c148
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h19
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c13
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.c61
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c2
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c41
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c657
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c115
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c156
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h9
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c586
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c64
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c30
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c2
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c261
-rw-r--r--drivers/gpu/drm/imx/Kconfig9
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c2
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c21
-rw-r--r--drivers/gpu/drm/imx/imx-drm.h3
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c2
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c3
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c63
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c10
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.h2
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c6
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_cursor.c11
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c2
-rw-r--r--drivers/gpu/drm/msm/Kconfig8
-rw-r--r--drivers/gpu/drm/msm/Makefile3
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c52
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h6
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c35
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h4
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c508
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c6
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c195
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.c3
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.h10
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c533
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c87
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c3
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c198
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c278
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h15
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c13
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c23
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c3
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c3
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c20
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c129
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c76
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h11
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c4
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dac.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c9
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c43
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c8
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h344
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h344
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h344
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h308
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h474
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c20
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h2
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c1
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c30
-rw-r--r--drivers/gpu/drm/radeon/cik.c5
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c5
-rw-r--r--drivers/gpu/drm/radeon/r100.c12
-rw-r--r--drivers/gpu/drm/radeon/r600.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c110
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c50
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h7
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c7
-rw-r--r--drivers/gpu/drm/radeon/rs600.c2
-rw-r--r--drivers/gpu/drm/radeon/rs690.c10
-rw-r--r--drivers/gpu/drm/radeon/rv730_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c4
-rw-r--r--drivers/gpu/drm/radeon/si.c5
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c3
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c6
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c49
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_kms.c2
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c2
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c2
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c2
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c7
-rw-r--r--drivers/gpu/drm/tegra/Kconfig12
-rw-r--r--drivers/gpu/drm/tegra/dc.c11
-rw-r--r--drivers/gpu/drm/tegra/drm.c4
-rw-r--r--drivers/gpu/drm/tegra/drm.h8
-rw-r--r--drivers/gpu/drm/tegra/dsi.c2
-rw-r--r--drivers/gpu/drm/tegra/fb.c16
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c2
-rw-r--r--drivers/gpu/drm/tegra/rgb.c2
-rw-r--r--drivers/gpu/drm/tegra/sor.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c2
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h2
-rw-r--r--drivers/gpu/drm/udl/udl_encoder.c3
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c5
-rw-r--r--drivers/gpu/drm/vc4/Makefile11
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c517
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c110
-rw-r--r--drivers/gpu/drm/vc4/vc4_debugfs.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c37
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h318
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c867
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c210
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c149
-rw-r--r--drivers/gpu/drm/vc4/vc4_packet.h399
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c60
-rw-r--r--drivers/gpu/drm/vc4/vc4_qpu_defines.h264
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h2
-rw-r--r--drivers/gpu/drm/vc4/vc4_render_cl.c634
-rw-r--r--drivers/gpu/drm/vc4/vc4_trace.h63
-rw-r--r--drivers/gpu/drm/vc4/vc4_trace_points.c14
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c262
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c900
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate_shaders.c513
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c20
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fb.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c6
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c69
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-lg.c5
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/wacom_wac.c5
-rw-r--r--drivers/hwmon/Kconfig2
-rw-r--r--drivers/hwmon/applesmc.c2
-rw-r--r--drivers/hwmon/scpi-hwmon.c21
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-i801.c6
-rw-r--r--drivers/i2c/busses/i2c-imx.c1
-rw-r--r--drivers/i2c/busses/i2c-xiic.c4
-rw-r--r--drivers/i2c/i2c-core.c2
-rw-r--r--drivers/iio/adc/ad7793.c2
-rw-r--r--drivers/iio/adc/vf610_adc.c22
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c1
-rw-r--r--drivers/iio/dac/ad5064.c91
-rw-r--r--drivers/iio/humidity/si7020.c8
-rw-r--r--drivers/iommu/s390-iommu.c23
-rw-r--r--drivers/irqchip/irq-gic-common.c13
-rw-r--r--drivers/irqchip/irq-gic.c38
-rw-r--r--drivers/isdn/hisax/config.c2
-rw-r--r--drivers/isdn/hisax/hfc_pci.c2
-rw-r--r--drivers/isdn/hisax/hfc_sx.c2
-rw-r--r--drivers/isdn/hisax/q931.c6
-rw-r--r--drivers/lightnvm/core.c139
-rw-r--r--drivers/lightnvm/gennvm.c87
-rw-r--r--drivers/lightnvm/gennvm.h2
-rw-r--r--drivers/lightnvm/rrpc.c32
-rw-r--r--drivers/md/dm-crypt.c22
-rw-r--r--drivers/md/dm-mpath.c30
-rw-r--r--drivers/md/dm-thin.c6
-rw-r--r--drivers/md/dm.c7
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c4
-rw-r--r--drivers/media/pci/cx25821/cx25821-core.c3
-rw-r--r--drivers/media/pci/cx88/cx88-alsa.c4
-rw-r--r--drivers/media/pci/cx88/cx88-mpeg.c3
-rw-r--r--drivers/media/pci/cx88/cx88-video.c4
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_core.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-core.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-core.c4
-rw-r--r--drivers/media/pci/tw68/tw68-core.c4
-rw-r--r--drivers/mmc/card/block.c11
-rw-r--r--drivers/mmc/core/mmc.c93
-rw-r--r--drivers/mmc/host/Kconfig1
-rw-r--r--drivers/mmc/host/mtk-sd.c2
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/mtd/nand/jz4740_nand.c1
-rw-r--r--drivers/mtd/nand/nand_base.c2
-rw-r--r--drivers/net/can/bfin_can.c2
-rw-r--r--drivers/net/can/c_can/c_can.c7
-rw-r--r--drivers/net/can/cc770/cc770.c2
-rw-r--r--drivers/net/can/flexcan.c4
-rw-r--r--drivers/net/can/janz-ican3.c1
-rw-r--r--drivers/net/can/m_can/m_can.c7
-rw-r--r--drivers/net/can/pch_can.c3
-rw-r--r--drivers/net/can/rcar_can.c11
-rw-r--r--drivers/net/can/sja1000/sja1000.c4
-rw-r--r--drivers/net/can/sun4i_can.c1
-rw-r--r--drivers/net/can/ti_hecc.c7
-rw-r--r--drivers/net/can/usb/ems_usb.c1
-rw-r--r--drivers/net/can/usb/esd_usb2.c1
-rw-r--r--drivers/net/can/usb/kvaser_usb.c5
-rw-r--r--drivers/net/can/usb/usb_8dev.c4
-rw-r--r--drivers/net/can/xilinx_can.c9
-rw-r--r--drivers/net/dsa/mv88e6060.c114
-rw-r--r--drivers/net/dsa/mv88e6060.h111
-rw-r--r--drivers/net/ethernet/Kconfig2
-rw-r--r--drivers/net/ethernet/Makefile2
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c5
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c31
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c2
-rw-r--r--drivers/net/ethernet/atheros/alx/reg.h1
-rw-r--r--drivers/net/ethernet/aurora/Kconfig20
-rw-r--r--drivers/net/ethernet/aurora/Makefile1
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c1552
-rw-r--r--drivers/net/ethernet/aurora/nb8800.h316
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c44
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c7
-rw-r--r--drivers/net/ethernet/cadence/macb.c4
-rw-r--r--drivers/net/ethernet/cadence/macb.h5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h5
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c23
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c16
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c14
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c28
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h2
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c9
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c2
-rw-r--r--drivers/net/ethernet/dlink/Kconfig5
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c55
-rw-r--r--drivers/net/ethernet/dlink/dl2k.h15
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c19
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/freescale/Kconfig3
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c6
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c2
-rw-r--r--drivers/net/ethernet/icplus/Kconfig13
-rw-r--r--drivers/net/ethernet/icplus/Makefile5
-rw-r--r--drivers/net/ethernet/icplus/ipg.c2300
-rw-r--r--drivers/net/ethernet/icplus/ipg.h748
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c4
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c76
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c6
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c14
-rw-r--r--drivers/net/ethernet/sfc/efx.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c28
-rw-r--r--drivers/net/ethernet/ti/cpsw-common.c3
-rw-r--r--drivers/net/ethernet/via/via-velocity.c24
-rw-r--r--drivers/net/fjes/fjes_hw.c2
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c14
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/macvtap.c4
-rw-r--r--drivers/net/phy/at803x.c4
-rw-r--r--drivers/net/phy/broadcom.c2
-rw-r--r--drivers/net/phy/marvell.c16
-rw-r--r--drivers/net/phy/phy.c6
-rw-r--r--drivers/net/phy/vitesse.c16
-rw-r--r--drivers/net/tun.c4
-rw-r--r--drivers/net/usb/cdc_ether.c5
-rw-r--r--drivers/net/usb/cdc_ncm.c8
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c78
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/net/vrf.c11
-rw-r--r--drivers/net/wan/hdlc_fr.c10
-rw-r--r--drivers/net/wan/x25_asy.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c49
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h17
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c53
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-8000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c8
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c11
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c88
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.h4
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c19
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c2
-rw-r--r--drivers/nvme/host/Makefile3
-rw-r--r--drivers/nvme/host/lightnvm.c165
-rw-r--r--drivers/nvme/host/nvme.h14
-rw-r--r--drivers/nvme/host/pci.c51
-rw-r--r--drivers/pci/host/pcie-designware.c1
-rw-r--r--drivers/pci/host/pcie-hisi.c4
-rw-r--r--drivers/pci/pci-driver.c16
-rw-r--r--drivers/pci/pci-sysfs.c5
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/probe.c4
-rw-r--r--drivers/pci/quirks.c4
-rw-r--r--drivers/pinctrl/Kconfig4
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1-core.c8
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c11
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c2
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7734.c6
-rw-r--r--drivers/remoteproc/remoteproc_core.c2
-rw-r--r--drivers/remoteproc/remoteproc_debugfs.c2
-rw-r--r--drivers/rtc/rtc-ds1307.c44
-rw-r--r--drivers/s390/cio/chsc.c37
-rw-r--r--drivers/s390/cio/chsc.h15
-rw-r--r--drivers/s390/cio/cio.c14
-rw-r--r--drivers/s390/cio/css.c5
-rw-r--r--drivers/s390/crypto/Makefile7
-rw-r--r--drivers/s390/crypto/ap_bus.c6
-rw-r--r--drivers/s390/crypto/zcrypt_api.c10
-rw-r--r--drivers/s390/crypto/zcrypt_api.h1
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c1
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c3
-rw-r--r--drivers/scsi/Kconfig2
-rw-r--r--drivers/scsi/advansys.c2
-rw-r--r--drivers/scsi/hosts.c11
-rw-r--r--drivers/scsi/hpsa.c2
-rw-r--r--drivers/scsi/mpt3sas/Kconfig9
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c3
-rw-r--r--drivers/scsi/mvsas/mv_init.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c3
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c2
-rw-r--r--drivers/scsi/scsi_debug.c9
-rw-r--r--drivers/scsi/scsi_scan.c9
-rw-r--r--drivers/scsi/scsi_sysfs.c22
-rw-r--r--drivers/scsi/sd.c69
-rw-r--r--drivers/scsi/sd.h1
-rw-r--r--drivers/scsi/st.c5
-rw-r--r--drivers/sh/pm_runtime.c2
-rw-r--r--drivers/soc/mediatek/Kconfig1
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c8
-rw-r--r--drivers/spi/spi-bcm63xx.c4
-rw-r--r--drivers/spi/spi-mt65xx.c26
-rw-r--r--drivers/spi/spi-pl022.c28
-rw-r--r--drivers/spi/spi.c2
-rw-r--r--drivers/staging/iio/Kconfig3
-rw-r--r--drivers/staging/iio/adc/lpc32xx_adc.c4
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h1
-rw-r--r--drivers/staging/lustre/lustre/libcfs/module.c17
-rw-r--r--drivers/staging/wilc1000/coreconfigurator.c48
-rw-r--r--drivers/target/iscsi/iscsi_target.c13
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c10
-rw-r--r--drivers/target/target_core_sbc.c17
-rw-r--r--drivers/target/target_core_stat.c2
-rw-r--r--drivers/target/target_core_tmr.c7
-rw-r--r--drivers/target/target_core_transport.c26
-rw-r--r--drivers/target/target_core_user.c4
-rw-r--r--drivers/thermal/Kconfig2
-rw-r--r--drivers/thermal/imx_thermal.c56
-rw-r--r--drivers/thermal/of-thermal.c2
-rw-r--r--drivers/thermal/power_allocator.c24
-rw-r--r--drivers/thermal/rcar_thermal.c49
-rw-r--r--drivers/thermal/rockchip_thermal.c328
-rw-r--r--drivers/tty/n_tty.c2
-rw-r--r--drivers/tty/serial/8250/8250_fsl.c1
-rw-r--r--drivers/tty/serial/8250/Kconfig1
-rw-r--r--drivers/tty/serial/Kconfig2
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c2
-rw-r--r--drivers/tty/serial/etraxfs-uart.c2
-rw-r--r--drivers/tty/tty_audit.c2
-rw-r--r--drivers/tty/tty_io.c4
-rw-r--r--drivers/tty/tty_ioctl.c4
-rw-r--r--drivers/tty/tty_ldisc.c2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c142
-rw-r--r--drivers/usb/chipidea/debug.c2
-rw-r--r--drivers/usb/chipidea/udc.c17
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c10
-rw-r--r--drivers/usb/class/usblp.c2
-rw-r--r--drivers/usb/core/Kconfig3
-rw-r--r--drivers/usb/dwc2/hcd.c9
-rw-r--r--drivers/usb/dwc2/platform.c3
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c4
-rw-r--r--drivers/usb/dwc3/gadget.c24
-rw-r--r--drivers/usb/gadget/function/f_loopback.c2
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c2
-rw-r--r--drivers/usb/host/xhci-hub.c15
-rw-r--r--drivers/usb/host/xhci-ring.c32
-rw-r--r--drivers/usb/host/xhci.c10
-rw-r--r--drivers/usb/musb/musb_core.c12
-rw-r--r--drivers/usb/musb/musb_host.c22
-rw-r--r--drivers/usb/phy/Kconfig4
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c7
-rw-r--r--drivers/usb/phy/phy-omap-otg.c2
-rw-r--r--drivers/usb/serial/option.c11
-rw-r--r--drivers/usb/serial/qcserial.c94
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c2
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.h4
-rw-r--r--drivers/watchdog/Kconfig2
-rw-r--r--drivers/watchdog/mtk_wdt.c1
-rw-r--r--drivers/watchdog/omap_wdt.c2
-rw-r--r--drivers/watchdog/pnx4008_wdt.c8
-rw-r--r--drivers/watchdog/tegra_wdt.c4
-rw-r--r--drivers/watchdog/w83977f_wdt.c2
-rw-r--r--drivers/xen/events/events_base.c5
-rw-r--r--drivers/xen/evtchn.c123
-rw-r--r--drivers/xen/gntdev.c2
-rw-r--r--fs/Kconfig6
-rw-r--r--fs/block_dev.c18
-rw-r--r--fs/btrfs/backref.c2
-rw-r--r--fs/btrfs/ctree.h4
-rw-r--r--fs/btrfs/extent-tree.c123
-rw-r--r--fs/btrfs/file.c10
-rw-r--r--fs/btrfs/inode.c24
-rw-r--r--fs/btrfs/qgroup.c5
-rw-r--r--fs/btrfs/scrub.c62
-rw-r--r--fs/btrfs/tests/free-space-tests.c4
-rw-r--r--fs/btrfs/transaction.c32
-rw-r--r--fs/btrfs/transaction.h4
-rw-r--r--fs/btrfs/volumes.c13
-rw-r--r--fs/btrfs/volumes.h2
-rw-r--r--fs/cachefiles/rdwr.c2
-rw-r--r--fs/configfs/dir.c110
-rw-r--r--fs/dax.c4
-rw-r--r--fs/direct-io.c10
-rw-r--r--fs/dlm/lowcomms.c4
-rw-r--r--fs/ext2/super.c2
-rw-r--r--fs/ext4/super.c6
-rw-r--r--fs/fat/dir.c16
-rw-r--r--fs/hugetlbfs/inode.c65
-rw-r--r--fs/namei.c1
-rw-r--r--fs/ncpfs/ioctl.c2
-rw-r--r--fs/nfs/callback_xdr.c7
-rw-r--r--fs/nfs/inode.c11
-rw-r--r--fs/nfs/nfs42proc.c3
-rw-r--r--fs/nfs/nfs4client.c2
-rw-r--r--fs/nfs/nfs4file.c59
-rw-r--r--fs/nfs/nfs4proc.c2
-rw-r--r--fs/nfs/nfs4xdr.c1
-rw-r--r--fs/nfs/pnfs.c56
-rw-r--r--fs/ocfs2/namei.c2
-rw-r--r--fs/overlayfs/copy_up.c23
-rw-r--r--fs/overlayfs/inode.c19
-rw-r--r--fs/overlayfs/overlayfs.h3
-rw-r--r--fs/splice.c8
-rw-r--r--fs/sysv/inode.c11
-rw-r--r--include/drm/drmP.h22
-rw-r--r--include/drm/drm_atomic.h9
-rw-r--r--include/drm/drm_atomic_helper.h8
-rw-r--r--include/drm/drm_crtc.h1222
-rw-r--r--include/drm/drm_crtc_helper.h167
-rw-r--r--include/drm/drm_dp_helper.h36
-rw-r--r--include/drm/drm_fb_cma_helper.h2
-rw-r--r--include/drm/drm_fb_helper.h101
-rw-r--r--include/drm/drm_gem.h106
-rw-r--r--include/drm/drm_mm.h26
-rw-r--r--include/drm/drm_modes.h347
-rw-r--r--include/drm/drm_modeset_helper_vtables.h890
-rw-r--r--include/drm/drm_modeset_lock.h4
-rw-r--r--include/drm/drm_plane_helper.h38
-rw-r--r--include/drm/drm_rect.h3
-rw-r--r--include/drm/i915_component.h69
-rw-r--r--include/drm/i915_pciids.h49
-rw-r--r--include/drm/ttm/ttm_bo_driver.h4
-rw-r--r--include/kvm/arm_vgic.h2
-rw-r--r--include/linux/acpi.h4
-rw-r--r--include/linux/blkdev.h5
-rw-r--r--include/linux/bpf.h5
-rw-r--r--include/linux/configfs.h10
-rw-r--r--include/linux/cpufreq.h1
-rw-r--r--include/linux/dns_resolver.h2
-rw-r--r--include/linux/gfp.h2
-rw-r--r--include/linux/ipv6.h2
-rw-r--r--include/linux/kref.h33
-rw-r--r--include/linux/kvm_host.h11
-rw-r--r--include/linux/lightnvm.h176
-rw-r--r--include/linux/marvell_phy.h1
-rw-r--r--include/linux/mlx5/mlx5_ifc.h24
-rw-r--r--include/linux/net.h13
-rw-r--r--include/linux/netdevice.h35
-rw-r--r--include/linux/netfilter/ipset/ip_set.h2
-rw-r--r--include/linux/netfilter_ingress.h13
-rw-r--r--include/linux/nfs_xdr.h1
-rw-r--r--include/linux/of_dma.h2
-rw-r--r--include/linux/pci.h9
-rw-r--r--include/linux/scpi_protocol.h2
-rw-r--r--include/linux/signal.h1
-rw-r--r--include/linux/slab.h45
-rw-r--r--include/linux/syscalls.h2
-rw-r--r--include/linux/thermal.h3
-rw-r--r--include/linux/tty.h6
-rw-r--r--include/linux/types.h2
-rw-r--r--include/net/af_unix.h1
-rw-r--r--include/net/ip6_fib.h3
-rw-r--r--include/net/ip6_route.h17
-rw-r--r--include/net/ip6_tunnel.h3
-rw-r--r--include/net/ip_tunnels.h3
-rw-r--r--include/net/ipv6.h22
-rw-r--r--include/net/mac80211.h6
-rw-r--r--include/net/ndisc.h3
-rw-r--r--include/net/netfilter/nf_tables.h16
-rw-r--r--include/net/sch_generic.h3
-rw-r--r--include/net/sctp/structs.h16
-rw-r--r--include/net/sock.h57
-rw-r--r--include/net/switchdev.h2
-rw-r--r--include/scsi/scsi_host.h3
-rw-r--r--include/sound/soc-dapm.h1
-rw-r--r--include/target/target_core_base.h2
-rw-r--r--include/uapi/drm/Kbuild2
-rw-r--r--include/uapi/drm/amdgpu_drm.h290
-rw-r--r--include/uapi/drm/armada_drm.h2
-rw-r--r--include/uapi/drm/drm.h9
-rw-r--r--include/uapi/drm/drm_fourcc.h2
-rw-r--r--include/uapi/drm/drm_mode.h18
-rw-r--r--include/uapi/drm/drm_sarea.h2
-rw-r--r--include/uapi/drm/exynos_drm.h8
-rw-r--r--include/uapi/drm/i810_drm.h2
-rw-r--r--include/uapi/drm/i915_drm.h13
-rw-r--r--include/uapi/drm/mga_drm.h2
-rw-r--r--include/uapi/drm/msm_drm.h5
-rw-r--r--include/uapi/drm/nouveau_drm.h86
-rw-r--r--include/uapi/drm/omap_drm.h2
-rw-r--r--include/uapi/drm/qxl_drm.h77
-rw-r--r--include/uapi/drm/r128_drm.h2
-rw-r--r--include/uapi/drm/radeon_drm.h128
-rw-r--r--include/uapi/drm/savage_drm.h2
-rw-r--r--include/uapi/drm/tegra_drm.h2
-rw-r--r--include/uapi/drm/vc4_drm.h279
-rw-r--r--include/uapi/drm/via_drm.h5
-rw-r--r--include/uapi/drm/virtgpu_drm.h101
-rw-r--r--include/uapi/drm/vmwgfx_drm.h268
-rw-r--r--include/uapi/linux/agpgart.h1
-rw-r--r--include/uapi/linux/nfs.h11
-rw-r--r--include/uapi/linux/virtio_gpu.h2
-rw-r--r--include/video/imx-ipu-v3.h1
-rw-r--r--kernel/async.c1
-rw-r--r--kernel/bpf/arraymap.c10
-rw-r--r--kernel/bpf/hashtab.c34
-rw-r--r--kernel/bpf/inode.c6
-rw-r--r--kernel/bpf/syscall.c40
-rw-r--r--kernel/bpf/verifier.c3
-rw-r--r--kernel/livepatch/core.c6
-rw-r--r--kernel/panic.c5
-rw-r--r--kernel/pid.c4
-rw-r--r--kernel/sched/core.c36
-rw-r--r--kernel/sched/cputime.c3
-rw-r--r--kernel/sched/rt.c2
-rw-r--r--kernel/sched/sched.h3
-rw-r--r--kernel/sched/wait.c16
-rw-r--r--kernel/signal.c2
-rw-r--r--kernel/trace/ring_buffer.c17
-rw-r--r--kernel/trace/trace_events.c16
-rw-r--r--mm/huge_memory.c4
-rw-r--r--mm/kasan/kasan.c2
-rw-r--r--mm/memory.c8
-rw-r--r--mm/page-writeback.c4
-rw-r--r--mm/slab.c2
-rw-r--r--mm/slab.h2
-rw-r--r--mm/slab_common.c6
-rw-r--r--mm/slob.c2
-rw-r--r--mm/slub.c304
-rw-r--r--mm/vmalloc.c5
-rw-r--r--net/8021q/vlan_core.c4
-rw-r--r--net/bluetooth/af_bluetooth.c6
-rw-r--r--net/bluetooth/smp.c7
-rw-r--r--net/bridge/br_stp.c2
-rw-r--r--net/bridge/br_stp_if.c2
-rw-r--r--net/caif/caif_socket.c4
-rw-r--r--net/core/datagram.c2
-rw-r--r--net/core/dev.c18
-rw-r--r--net/core/neighbour.c6
-rw-r--r--net/core/netclassid_cgroup.c26
-rw-r--r--net/core/rtnetlink.c274
-rw-r--r--net/core/scm.c2
-rw-r--r--net/core/skbuff.c3
-rw-r--r--net/core/sock.c12
-rw-r--r--net/core/stream.c6
-rw-r--r--net/dccp/ipv6.c37
-rw-r--r--net/dccp/proto.c3
-rw-r--r--net/decnet/af_decnet.c8
-rw-r--r--net/dns_resolver/dns_query.c2
-rw-r--r--net/hsr/hsr_device.c2
-rw-r--r--net/ipv4/igmp.c5
-rw-r--r--net/ipv4/inet_connection_sock.c4
-rw-r--r--net/ipv4/ipmr.c23
-rw-r--r--net/ipv4/netfilter/nf_nat_pptp.c2
-rw-r--r--net/ipv4/raw.c8
-rw-r--r--net/ipv4/tcp.c28
-rw-r--r--net/ipv4/tcp_diag.c2
-rw-r--r--net/ipv4/tcp_input.c23
-rw-r--r--net/ipv4/tcp_ipv4.c17
-rw-r--r--net/ipv4/tcp_timer.c14
-rw-r--r--net/ipv4/udp.c1
-rw-r--r--net/ipv6/addrconf.c2
-rw-r--r--net/ipv6/af_inet6.c15
-rw-r--r--net/ipv6/datagram.c4
-rw-r--r--net/ipv6/exthdrs.c3
-rw-r--r--net/ipv6/icmp.c14
-rw-r--r--net/ipv6/inet6_connection_sock.c21
-rw-r--r--net/ipv6/ip6_tunnel.c2
-rw-r--r--net/ipv6/ip6mr.c19
-rw-r--r--net/ipv6/ipv6_sockglue.c33
-rw-r--r--net/ipv6/mcast.c2
-rw-r--r--net/ipv6/ndisc.c10
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c5
-rw-r--r--net/ipv6/raw.c8
-rw-r--r--net/ipv6/reassembly.c10
-rw-r--r--net/ipv6/route.c24
-rw-r--r--net/ipv6/syncookies.c2
-rw-r--r--net/ipv6/tcp_ipv6.c51
-rw-r--r--net/ipv6/udp.c8
-rw-r--r--net/iucv/af_iucv.c2
-rw-r--r--net/l2tp/l2tp_ip6.c8
-rw-r--r--net/mac80211/agg-tx.c3
-rw-r--r--net/mac80211/cfg.c8
-rw-r--r--net/mac80211/iface.c5
-rw-r--r--net/mac80211/main.c3
-rw-r--r--net/mac80211/mesh_pathtbl.c8
-rw-r--r--net/mac80211/scan.c9
-rw-r--r--net/netfilter/Kconfig6
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_gen.h17
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ip.c14
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ipmac.c64
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_port.c18
-rw-r--r--net/netfilter/ipset/ip_set_core.c14
-rw-r--r--net/netfilter/ipset/ip_set_hash_gen.h26
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c16
-rw-r--r--net/netfilter/nfnetlink_log.c2
-rw-r--r--net/netfilter/nft_counter.c49
-rw-r--r--net/netfilter/nft_dynset.c5
-rw-r--r--net/nfc/llcp_sock.c2
-rw-r--r--net/openvswitch/dp_notify.c2
-rw-r--r--net/openvswitch/vport-geneve.c1
-rw-r--r--net/openvswitch/vport-gre.c1
-rw-r--r--net/openvswitch/vport-netdev.c8
-rw-r--r--net/openvswitch/vport.c8
-rw-r--r--net/openvswitch/vport.h8
-rw-r--r--net/packet/af_packet.c96
-rw-r--r--net/rds/connection.c6
-rw-r--r--net/rds/send.c4
-rw-r--r--net/rxrpc/ar-ack.c4
-rw-r--r--net/rxrpc/ar-output.c2
-rw-r--r--net/sched/sch_api.c27
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/sched/sch_mq.c4
-rw-r--r--net/sched/sch_mqprio.c4
-rw-r--r--net/sctp/auth.c4
-rw-r--r--net/sctp/ipv6.c13
-rw-r--r--net/sctp/socket.c39
-rw-r--r--net/socket.c21
-rw-r--r--net/sunrpc/backchannel_rqst.c8
-rw-r--r--net/sunrpc/svc.c1
-rw-r--r--net/sunrpc/xprtsock.c14
-rw-r--r--net/tipc/link.c2
-rw-r--r--net/tipc/socket.c10
-rw-r--r--net/tipc/udp_media.c7
-rw-r--r--net/unix/af_unix.c292
-rw-r--r--samples/bpf/Makefile7
-rwxr-xr-xscripts/kernel-doc2
-rw-r--r--security/keys/encrypted-keys/encrypted.c2
-rw-r--r--security/keys/trusted.c5
-rw-r--r--security/keys/user_defined.c5
-rw-r--r--security/selinux/ss/conditional.c4
-rw-r--r--sound/firewire/dice/dice.c4
-rw-r--r--sound/pci/hda/hda_intel.c7
-rw-r--r--sound/pci/hda/patch_conexant.c5
-rw-r--r--sound/pci/hda/patch_hdmi.c9
-rw-r--r--sound/pci/hda/patch_realtek.c23
-rw-r--r--sound/pci/hda/patch_sigmatel.c45
-rw-r--r--sound/soc/codecs/arizona.c16
-rw-r--r--sound/soc/codecs/es8328.c16
-rw-r--r--sound/soc/codecs/nau8825.c31
-rw-r--r--sound/soc/codecs/rl6231.c6
-rw-r--r--sound/soc/codecs/rt5645.c61
-rw-r--r--sound/soc/codecs/rt5670.h12
-rw-r--r--sound/soc/codecs/rt5677.c100
-rw-r--r--sound/soc/codecs/wm8960.c2
-rw-r--r--sound/soc/codecs/wm8962.c4
-rw-r--r--sound/soc/davinci/davinci-mcasp.c12
-rw-r--r--sound/soc/fsl/Kconfig2
-rw-r--r--sound/soc/fsl/fsl_sai.c3
-rw-r--r--sound/soc/intel/Kconfig2
-rw-r--r--sound/soc/intel/skylake/skl-topology.c1
-rw-r--r--sound/soc/rockchip/rockchip_spdif.c2
-rw-r--r--sound/soc/rockchip/rockchip_spdif.h6
-rw-r--r--sound/soc/sh/rcar/gen.c2
-rw-r--r--sound/soc/sh/rcar/src.c7
-rw-r--r--sound/soc/soc-core.c6
-rw-r--r--sound/soc/soc-dapm.c7
-rw-r--r--sound/soc/soc-ops.c2
-rw-r--r--sound/soc/soc-topology.c3
-rw-r--r--sound/soc/sti/uniperif_player.c9
-rw-r--r--sound/soc/sti/uniperif_reader.c3
-rw-r--r--sound/soc/sunxi/sun4i-codec.c27
-rw-r--r--sound/usb/midi.c46
-rw-r--r--sound/usb/quirks-table.h11
-rw-r--r--sound/usb/quirks.c1
-rw-r--r--sound/usb/usbaudio.h1
-rw-r--r--tools/Makefile11
-rw-r--r--tools/net/Makefile7
-rw-r--r--tools/perf/builtin-inject.c1
-rw-r--r--tools/perf/builtin-report.c6
-rw-r--r--tools/perf/ui/browsers/hists.c7
-rw-r--r--tools/perf/util/build-id.c1
-rw-r--r--tools/perf/util/dso.c17
-rw-r--r--tools/perf/util/dso.h1
-rw-r--r--tools/perf/util/machine.c1
-rw-r--r--tools/perf/util/probe-finder.c24
-rw-r--r--tools/perf/util/symbol.c34
-rw-r--r--tools/perf/util/symbol.h1
-rw-r--r--tools/power/x86/turbostat/turbostat.c8
-rw-r--r--tools/testing/nvdimm/test/nfit.c49
-rw-r--r--tools/testing/selftests/futex/README2
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c11
-rw-r--r--tools/vm/page-types.c1
-rw-r--r--virt/kvm/arm/arch_timer.c28
-rw-r--r--virt/kvm/arm/vgic.c50
1192 files changed, 31182 insertions, 17772 deletions
diff --git a/Documentation/DocBook/gpu.tmpl b/Documentation/DocBook/gpu.tmpl
index 201dcd3c2e9d..c66d6412f573 100644
--- a/Documentation/DocBook/gpu.tmpl
+++ b/Documentation/DocBook/gpu.tmpl
@@ -124,6 +124,43 @@
124 <para> 124 <para>
125 [Insert diagram of typical DRM stack here] 125 [Insert diagram of typical DRM stack here]
126 </para> 126 </para>
127 <sect1>
128 <title>Style Guidelines</title>
129 <para>
130 For consistency this documentation uses American English. Abbreviations
131 are written as all-uppercase, for example: DRM, KMS, IOCTL, CRTC, and so
132 on. To aid in reading, documentations make full use of the markup
133 characters kerneldoc provides: @parameter for function parameters, @member
134 for structure members, &amp;structure to reference structures and
135 function() for functions. These all get automatically hyperlinked if
136 kerneldoc for the referenced objects exists. When referencing entries in
137 function vtables please use -&gt;vfunc(). Note that kerneldoc does
138 not support referencing struct members directly, so please add a reference
139 to the vtable struct somewhere in the same paragraph or at least section.
140 </para>
141 <para>
142 Except in special situations (to separate locked from unlocked variants)
143 locking requirements for functions aren't documented in the kerneldoc.
144 Instead locking should be check at runtime using e.g.
145 <code>WARN_ON(!mutex_is_locked(...));</code>. Since it's much easier to
146 ignore documentation than runtime noise this provides more value. And on
147 top of that runtime checks do need to be updated when the locking rules
148 change, increasing the chances that they're correct. Within the
149 documentation the locking rules should be explained in the relevant
150 structures: Either in the comment for the lock explaining what it
151 protects, or data fields need a note about which lock protects them, or
152 both.
153 </para>
154 <para>
155 Functions which have a non-<code>void</code> return value should have a
156 section called "Returns" explaining the expected return values in
157 different cases and their meanings. Currently there's no consensus whether
158 that section name should be all upper-case or not, and whether it should
159 end in a colon or not. Go with the file-local style. Other common section
160 names are "Notes" with information for dangerous or tricky corner cases,
161 and "FIXME" where the interface could be cleaned up.
162 </para>
163 </sect1>
127 </chapter> 164 </chapter>
128 165
129 <!-- Internals --> 166 <!-- Internals -->
@@ -615,18 +652,6 @@ char *date;</synopsis>
615 <function>drm_gem_object_init</function>. Storage for private GEM 652 <function>drm_gem_object_init</function>. Storage for private GEM
616 objects must be managed by drivers. 653 objects must be managed by drivers.
617 </para> 654 </para>
618 <para>
619 Drivers that do not need to extend GEM objects with private information
620 can call the <function>drm_gem_object_alloc</function> function to
621 allocate and initialize a struct <structname>drm_gem_object</structname>
622 instance. The GEM core will call the optional driver
623 <methodname>gem_init_object</methodname> operation after initializing
624 the GEM object with <function>drm_gem_object_init</function>.
625 <synopsis>int (*gem_init_object) (struct drm_gem_object *obj);</synopsis>
626 </para>
627 <para>
628 No alloc-and-init function exists for private GEM objects.
629 </para>
630 </sect3> 655 </sect3>
631 <sect3> 656 <sect3>
632 <title>GEM Objects Lifetime</title> 657 <title>GEM Objects Lifetime</title>
@@ -635,10 +660,10 @@ char *date;</synopsis>
635 acquired and release by <function>calling drm_gem_object_reference</function> 660 acquired and release by <function>calling drm_gem_object_reference</function>
636 and <function>drm_gem_object_unreference</function> respectively. The 661 and <function>drm_gem_object_unreference</function> respectively. The
637 caller must hold the <structname>drm_device</structname> 662 caller must hold the <structname>drm_device</structname>
638 <structfield>struct_mutex</structfield> lock. As a convenience, GEM 663 <structfield>struct_mutex</structfield> lock when calling
639 provides the <function>drm_gem_object_reference_unlocked</function> and 664 <function>drm_gem_object_reference</function>. As a convenience, GEM
640 <function>drm_gem_object_unreference_unlocked</function> functions that 665 provides <function>drm_gem_object_unreference_unlocked</function>
641 can be called without holding the lock. 666 functions that can be called without holding the lock.
642 </para> 667 </para>
643 <para> 668 <para>
644 When the last reference to a GEM object is released the GEM core calls 669 When the last reference to a GEM object is released the GEM core calls
@@ -649,15 +674,9 @@ char *date;</synopsis>
649 </para> 674 </para>
650 <para> 675 <para>
651 <synopsis>void (*gem_free_object) (struct drm_gem_object *obj);</synopsis> 676 <synopsis>void (*gem_free_object) (struct drm_gem_object *obj);</synopsis>
652 Drivers are responsible for freeing all GEM object resources, including 677 Drivers are responsible for freeing all GEM object resources. This includes
653 the resources created by the GEM core. If an mmap offset has been 678 the resources created by the GEM core, which need to be released with
654 created for the object (in which case 679 <function>drm_gem_object_release</function>.
655 <structname>drm_gem_object</structname>::<structfield>map_list</structfield>::<structfield>map</structfield>
656 is not NULL) it must be freed by a call to
657 <function>drm_gem_free_mmap_offset</function>. The shmfs backing store
658 must be released by calling <function>drm_gem_object_release</function>
659 (that function can safely be called if no shmfs backing store has been
660 created).
661 </para> 680 </para>
662 </sect3> 681 </sect3>
663 <sect3> 682 <sect3>
@@ -740,17 +759,10 @@ char *date;</synopsis>
740 DRM identifies the GEM object to be mapped by a fake offset passed 759 DRM identifies the GEM object to be mapped by a fake offset passed
741 through the mmap offset argument. Prior to being mapped, a GEM object 760 through the mmap offset argument. Prior to being mapped, a GEM object
742 must thus be associated with a fake offset. To do so, drivers must call 761 must thus be associated with a fake offset. To do so, drivers must call
743 <function>drm_gem_create_mmap_offset</function> on the object. The 762 <function>drm_gem_create_mmap_offset</function> on the object.
744 function allocates a fake offset range from a pool and stores the
745 offset divided by PAGE_SIZE in
746 <literal>obj-&gt;map_list.hash.key</literal>. Care must be taken not to
747 call <function>drm_gem_create_mmap_offset</function> if a fake offset
748 has already been allocated for the object. This can be tested by
749 <literal>obj-&gt;map_list.map</literal> being non-NULL.
750 </para> 763 </para>
751 <para> 764 <para>
752 Once allocated, the fake offset value 765 Once allocated, the fake offset value
753 (<literal>obj-&gt;map_list.hash.key &lt;&lt; PAGE_SHIFT</literal>)
754 must be passed to the application in a driver-specific way and can then 766 must be passed to the application in a driver-specific way and can then
755 be used as the mmap offset argument. 767 be used as the mmap offset argument.
756 </para> 768 </para>
@@ -836,10 +848,11 @@ char *date;</synopsis>
836 abstracted from the client in libdrm. 848 abstracted from the client in libdrm.
837 </para> 849 </para>
838 </sect3> 850 </sect3>
839 <sect3> 851 </sect2>
840 <title>GEM Function Reference</title> 852 <sect2>
853 <title>GEM Function Reference</title>
841!Edrivers/gpu/drm/drm_gem.c 854!Edrivers/gpu/drm/drm_gem.c
842 </sect3> 855!Iinclude/drm/drm_gem.h
843 </sect2> 856 </sect2>
844 <sect2> 857 <sect2>
845 <title>VMA Offset Manager</title> 858 <title>VMA Offset Manager</title>
@@ -970,6 +983,7 @@ int max_width, max_height;</synopsis>
970 <sect2> 983 <sect2>
971 <title>Atomic Mode Setting Function Reference</title> 984 <title>Atomic Mode Setting Function Reference</title>
972!Edrivers/gpu/drm/drm_atomic.c 985!Edrivers/gpu/drm/drm_atomic.c
986!Idrivers/gpu/drm/drm_atomic.c
973 </sect2> 987 </sect2>
974 <sect2> 988 <sect2>
975 <title>Frame Buffer Creation</title> 989 <title>Frame Buffer Creation</title>
@@ -1197,137 +1211,6 @@ int max_width, max_height;</synopsis>
1197 pointer to CRTC functions. 1211 pointer to CRTC functions.
1198 </para> 1212 </para>
1199 </sect3> 1213 </sect3>
1200 <sect3 id="drm-kms-crtcops">
1201 <title>CRTC Operations</title>
1202 <sect4>
1203 <title>Set Configuration</title>
1204 <synopsis>int (*set_config)(struct drm_mode_set *set);</synopsis>
1205 <para>
1206 Apply a new CRTC configuration to the device. The configuration
1207 specifies a CRTC, a frame buffer to scan out from, a (x,y) position in
1208 the frame buffer, a display mode and an array of connectors to drive
1209 with the CRTC if possible.
1210 </para>
1211 <para>
1212 If the frame buffer specified in the configuration is NULL, the driver
1213 must detach all encoders connected to the CRTC and all connectors
1214 attached to those encoders and disable them.
1215 </para>
1216 <para>
1217 This operation is called with the mode config lock held.
1218 </para>
1219 <note><para>
1220 Note that the drm core has no notion of restoring the mode setting
1221 state after resume, since all resume handling is in the full
1222 responsibility of the driver. The common mode setting helper library
1223 though provides a helper which can be used for this:
1224 <function>drm_helper_resume_force_mode</function>.
1225 </para></note>
1226 </sect4>
1227 <sect4>
1228 <title>Page Flipping</title>
1229 <synopsis>int (*page_flip)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1230 struct drm_pending_vblank_event *event);</synopsis>
1231 <para>
1232 Schedule a page flip to the given frame buffer for the CRTC. This
1233 operation is called with the mode config mutex held.
1234 </para>
1235 <para>
1236 Page flipping is a synchronization mechanism that replaces the frame
1237 buffer being scanned out by the CRTC with a new frame buffer during
1238 vertical blanking, avoiding tearing. When an application requests a page
1239 flip the DRM core verifies that the new frame buffer is large enough to
1240 be scanned out by the CRTC in the currently configured mode and then
1241 calls the CRTC <methodname>page_flip</methodname> operation with a
1242 pointer to the new frame buffer.
1243 </para>
1244 <para>
1245 The <methodname>page_flip</methodname> operation schedules a page flip.
1246 Once any pending rendering targeting the new frame buffer has
1247 completed, the CRTC will be reprogrammed to display that frame buffer
1248 after the next vertical refresh. The operation must return immediately
1249 without waiting for rendering or page flip to complete and must block
1250 any new rendering to the frame buffer until the page flip completes.
1251 </para>
1252 <para>
1253 If a page flip can be successfully scheduled the driver must set the
1254 <code>drm_crtc-&gt;fb</code> field to the new framebuffer pointed to
1255 by <code>fb</code>. This is important so that the reference counting
1256 on framebuffers stays balanced.
1257 </para>
1258 <para>
1259 If a page flip is already pending, the
1260 <methodname>page_flip</methodname> operation must return
1261 -<errorname>EBUSY</errorname>.
1262 </para>
1263 <para>
1264 To synchronize page flip to vertical blanking the driver will likely
1265 need to enable vertical blanking interrupts. It should call
1266 <function>drm_vblank_get</function> for that purpose, and call
1267 <function>drm_vblank_put</function> after the page flip completes.
1268 </para>
1269 <para>
1270 If the application has requested to be notified when page flip completes
1271 the <methodname>page_flip</methodname> operation will be called with a
1272 non-NULL <parameter>event</parameter> argument pointing to a
1273 <structname>drm_pending_vblank_event</structname> instance. Upon page
1274 flip completion the driver must call <methodname>drm_send_vblank_event</methodname>
1275 to fill in the event and send to wake up any waiting processes.
1276 This can be performed with
1277 <programlisting><![CDATA[
1278 spin_lock_irqsave(&dev->event_lock, flags);
1279 ...
1280 drm_send_vblank_event(dev, pipe, event);
1281 spin_unlock_irqrestore(&dev->event_lock, flags);
1282 ]]></programlisting>
1283 </para>
1284 <note><para>
1285 FIXME: Could drivers that don't need to wait for rendering to complete
1286 just add the event to <literal>dev-&gt;vblank_event_list</literal> and
1287 let the DRM core handle everything, as for "normal" vertical blanking
1288 events?
1289 </para></note>
1290 <para>
1291 While waiting for the page flip to complete, the
1292 <literal>event-&gt;base.link</literal> list head can be used freely by
1293 the driver to store the pending event in a driver-specific list.
1294 </para>
1295 <para>
1296 If the file handle is closed before the event is signaled, drivers must
1297 take care to destroy the event in their
1298 <methodname>preclose</methodname> operation (and, if needed, call
1299 <function>drm_vblank_put</function>).
1300 </para>
1301 </sect4>
1302 <sect4>
1303 <title>Miscellaneous</title>
1304 <itemizedlist>
1305 <listitem>
1306 <synopsis>void (*set_property)(struct drm_crtc *crtc,
1307 struct drm_property *property, uint64_t value);</synopsis>
1308 <para>
1309 Set the value of the given CRTC property to
1310 <parameter>value</parameter>. See <xref linkend="drm-kms-properties"/>
1311 for more information about properties.
1312 </para>
1313 </listitem>
1314 <listitem>
1315 <synopsis>void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
1316 uint32_t start, uint32_t size);</synopsis>
1317 <para>
1318 Apply a gamma table to the device. The operation is optional.
1319 </para>
1320 </listitem>
1321 <listitem>
1322 <synopsis>void (*destroy)(struct drm_crtc *crtc);</synopsis>
1323 <para>
1324 Destroy the CRTC when not needed anymore. See
1325 <xref linkend="drm-kms-init"/>.
1326 </para>
1327 </listitem>
1328 </itemizedlist>
1329 </sect4>
1330 </sect3>
1331 </sect2> 1214 </sect2>
1332 <sect2> 1215 <sect2>
1333 <title>Planes (struct <structname>drm_plane</structname>)</title> 1216 <title>Planes (struct <structname>drm_plane</structname>)</title>
@@ -1344,7 +1227,7 @@ int max_width, max_height;</synopsis>
1344 <listitem> 1227 <listitem>
1345 DRM_PLANE_TYPE_PRIMARY represents a "main" plane for a CRTC. Primary 1228 DRM_PLANE_TYPE_PRIMARY represents a "main" plane for a CRTC. Primary
1346 planes are the planes operated upon by CRTC modesetting and flipping 1229 planes are the planes operated upon by CRTC modesetting and flipping
1347 operations described in <xref linkend="drm-kms-crtcops"/>. 1230 operations described in the page_flip hook in <structname>drm_crtc_funcs</structname>.
1348 </listitem> 1231 </listitem>
1349 <listitem> 1232 <listitem>
1350 DRM_PLANE_TYPE_CURSOR represents a "cursor" plane for a CRTC. Cursor 1233 DRM_PLANE_TYPE_CURSOR represents a "cursor" plane for a CRTC. Cursor
@@ -1381,52 +1264,6 @@ int max_width, max_height;</synopsis>
1381 primary plane with standard capabilities. 1264 primary plane with standard capabilities.
1382 </para> 1265 </para>
1383 </sect3> 1266 </sect3>
1384 <sect3>
1385 <title>Plane Operations</title>
1386 <itemizedlist>
1387 <listitem>
1388 <synopsis>int (*update_plane)(struct drm_plane *plane, struct drm_crtc *crtc,
1389 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
1390 unsigned int crtc_w, unsigned int crtc_h,
1391 uint32_t src_x, uint32_t src_y,
1392 uint32_t src_w, uint32_t src_h);</synopsis>
1393 <para>
1394 Enable and configure the plane to use the given CRTC and frame buffer.
1395 </para>
1396 <para>
1397 The source rectangle in frame buffer memory coordinates is given by
1398 the <parameter>src_x</parameter>, <parameter>src_y</parameter>,
1399 <parameter>src_w</parameter> and <parameter>src_h</parameter>
1400 parameters (as 16.16 fixed point values). Devices that don't support
1401 subpixel plane coordinates can ignore the fractional part.
1402 </para>
1403 <para>
1404 The destination rectangle in CRTC coordinates is given by the
1405 <parameter>crtc_x</parameter>, <parameter>crtc_y</parameter>,
1406 <parameter>crtc_w</parameter> and <parameter>crtc_h</parameter>
1407 parameters (as integer values). Devices scale the source rectangle to
1408 the destination rectangle. If scaling is not supported, and the source
1409 rectangle size doesn't match the destination rectangle size, the
1410 driver must return a -<errorname>EINVAL</errorname> error.
1411 </para>
1412 </listitem>
1413 <listitem>
1414 <synopsis>int (*disable_plane)(struct drm_plane *plane);</synopsis>
1415 <para>
1416 Disable the plane. The DRM core calls this method in response to a
1417 DRM_IOCTL_MODE_SETPLANE ioctl call with the frame buffer ID set to 0.
1418 Disabled planes must not be processed by the CRTC.
1419 </para>
1420 </listitem>
1421 <listitem>
1422 <synopsis>void (*destroy)(struct drm_plane *plane);</synopsis>
1423 <para>
1424 Destroy the plane when not needed anymore. See
1425 <xref linkend="drm-kms-init"/>.
1426 </para>
1427 </listitem>
1428 </itemizedlist>
1429 </sect3>
1430 </sect2> 1267 </sect2>
1431 <sect2> 1268 <sect2>
1432 <title>Encoders (struct <structname>drm_encoder</structname>)</title> 1269 <title>Encoders (struct <structname>drm_encoder</structname>)</title>
@@ -1483,27 +1320,6 @@ int max_width, max_height;</synopsis>
1483 encoders they want to use to a CRTC. 1320 encoders they want to use to a CRTC.
1484 </para> 1321 </para>
1485 </sect3> 1322 </sect3>
1486 <sect3>
1487 <title>Encoder Operations</title>
1488 <itemizedlist>
1489 <listitem>
1490 <synopsis>void (*destroy)(struct drm_encoder *encoder);</synopsis>
1491 <para>
1492 Called to destroy the encoder when not needed anymore. See
1493 <xref linkend="drm-kms-init"/>.
1494 </para>
1495 </listitem>
1496 <listitem>
1497 <synopsis>void (*set_property)(struct drm_plane *plane,
1498 struct drm_property *property, uint64_t value);</synopsis>
1499 <para>
1500 Set the value of the given plane property to
1501 <parameter>value</parameter>. See <xref linkend="drm-kms-properties"/>
1502 for more information about properties.
1503 </para>
1504 </listitem>
1505 </itemizedlist>
1506 </sect3>
1507 </sect2> 1323 </sect2>
1508 <sect2> 1324 <sect2>
1509 <title>Connectors (struct <structname>drm_connector</structname>)</title> 1325 <title>Connectors (struct <structname>drm_connector</structname>)</title>
@@ -1707,27 +1523,6 @@ int max_width, max_height;</synopsis>
1707 connector_status_unknown. 1523 connector_status_unknown.
1708 </para> 1524 </para>
1709 </sect4> 1525 </sect4>
1710 <sect4>
1711 <title>Miscellaneous</title>
1712 <itemizedlist>
1713 <listitem>
1714 <synopsis>void (*set_property)(struct drm_connector *connector,
1715 struct drm_property *property, uint64_t value);</synopsis>
1716 <para>
1717 Set the value of the given connector property to
1718 <parameter>value</parameter>. See <xref linkend="drm-kms-properties"/>
1719 for more information about properties.
1720 </para>
1721 </listitem>
1722 <listitem>
1723 <synopsis>void (*destroy)(struct drm_connector *connector);</synopsis>
1724 <para>
1725 Destroy the connector when not needed anymore. See
1726 <xref linkend="drm-kms-init"/>.
1727 </para>
1728 </listitem>
1729 </itemizedlist>
1730 </sect4>
1731 </sect3> 1526 </sect3>
1732 </sect2> 1527 </sect2>
1733 <sect2> 1528 <sect2>
@@ -1854,83 +1649,7 @@ void intel_crt_init(struct drm_device *dev)
1854 entities. 1649 entities.
1855 </para> 1650 </para>
1856 <sect2> 1651 <sect2>
1857 <title>Helper Functions</title> 1652 <title>Legacy CRTC Helper Operations</title>
1858 <itemizedlist>
1859 <listitem>
1860 <synopsis>int drm_crtc_helper_set_config(struct drm_mode_set *set);</synopsis>
1861 <para>
1862 The <function>drm_crtc_helper_set_config</function> helper function
1863 is a CRTC <methodname>set_config</methodname> implementation. It
1864 first tries to locate the best encoder for each connector by calling
1865 the connector <methodname>best_encoder</methodname> helper
1866 operation.
1867 </para>
1868 <para>
1869 After locating the appropriate encoders, the helper function will
1870 call the <methodname>mode_fixup</methodname> encoder and CRTC helper
1871 operations to adjust the requested mode, or reject it completely in
1872 which case an error will be returned to the application. If the new
1873 configuration after mode adjustment is identical to the current
1874 configuration the helper function will return without performing any
1875 other operation.
1876 </para>
1877 <para>
1878 If the adjusted mode is identical to the current mode but changes to
1879 the frame buffer need to be applied, the
1880 <function>drm_crtc_helper_set_config</function> function will call
1881 the CRTC <methodname>mode_set_base</methodname> helper operation. If
1882 the adjusted mode differs from the current mode, or if the
1883 <methodname>mode_set_base</methodname> helper operation is not
1884 provided, the helper function performs a full mode set sequence by
1885 calling the <methodname>prepare</methodname>,
1886 <methodname>mode_set</methodname> and
1887 <methodname>commit</methodname> CRTC and encoder helper operations,
1888 in that order.
1889 </para>
1890 </listitem>
1891 <listitem>
1892 <synopsis>void drm_helper_connector_dpms(struct drm_connector *connector, int mode);</synopsis>
1893 <para>
1894 The <function>drm_helper_connector_dpms</function> helper function
1895 is a connector <methodname>dpms</methodname> implementation that
1896 tracks power state of connectors. To use the function, drivers must
1897 provide <methodname>dpms</methodname> helper operations for CRTCs
1898 and encoders to apply the DPMS state to the device.
1899 </para>
1900 <para>
1901 The mid-layer doesn't track the power state of CRTCs and encoders.
1902 The <methodname>dpms</methodname> helper operations can thus be
1903 called with a mode identical to the currently active mode.
1904 </para>
1905 </listitem>
1906 <listitem>
1907 <synopsis>int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
1908 uint32_t maxX, uint32_t maxY);</synopsis>
1909 <para>
1910 The <function>drm_helper_probe_single_connector_modes</function> helper
1911 function is a connector <methodname>fill_modes</methodname>
1912 implementation that updates the connection status for the connector
1913 and then retrieves a list of modes by calling the connector
1914 <methodname>get_modes</methodname> helper operation.
1915 </para>
1916 <para>
1917 If the helper operation returns no mode, and if the connector status
1918 is connector_status_connected, standard VESA DMT modes up to
1919 1024x768 are automatically added to the modes list by a call to
1920 <function>drm_add_modes_noedid</function>.
1921 </para>
1922 <para>
1923 The function then filters out modes larger than
1924 <parameter>max_width</parameter> and <parameter>max_height</parameter>
1925 if specified. It finally calls the optional connector
1926 <methodname>mode_valid</methodname> helper operation for each mode in
1927 the probed list to check whether the mode is valid for the connector.
1928 </para>
1929 </listitem>
1930 </itemizedlist>
1931 </sect2>
1932 <sect2>
1933 <title>CRTC Helper Operations</title>
1934 <itemizedlist> 1653 <itemizedlist>
1935 <listitem id="drm-helper-crtc-mode-fixup"> 1654 <listitem id="drm-helper-crtc-mode-fixup">
1936 <synopsis>bool (*mode_fixup)(struct drm_crtc *crtc, 1655 <synopsis>bool (*mode_fixup)(struct drm_crtc *crtc,
@@ -2076,198 +1795,6 @@ void intel_crt_init(struct drm_device *dev)
2076 <function>drm_add_edid_modes</function> manually in that case. 1795 <function>drm_add_edid_modes</function> manually in that case.
2077 </para> 1796 </para>
2078 <para> 1797 <para>
2079 When adding modes manually the driver creates each mode with a call to
2080 <function>drm_mode_create</function> and must fill the following fields.
2081 <itemizedlist>
2082 <listitem>
2083 <synopsis>__u32 type;</synopsis>
2084 <para>
2085 Mode type bitmask, a combination of
2086 <variablelist>
2087 <varlistentry>
2088 <term>DRM_MODE_TYPE_BUILTIN</term>
2089 <listitem><para>not used?</para></listitem>
2090 </varlistentry>
2091 <varlistentry>
2092 <term>DRM_MODE_TYPE_CLOCK_C</term>
2093 <listitem><para>not used?</para></listitem>
2094 </varlistentry>
2095 <varlistentry>
2096 <term>DRM_MODE_TYPE_CRTC_C</term>
2097 <listitem><para>not used?</para></listitem>
2098 </varlistentry>
2099 <varlistentry>
2100 <term>
2101 DRM_MODE_TYPE_PREFERRED - The preferred mode for the connector
2102 </term>
2103 <listitem>
2104 <para>not used?</para>
2105 </listitem>
2106 </varlistentry>
2107 <varlistentry>
2108 <term>DRM_MODE_TYPE_DEFAULT</term>
2109 <listitem><para>not used?</para></listitem>
2110 </varlistentry>
2111 <varlistentry>
2112 <term>DRM_MODE_TYPE_USERDEF</term>
2113 <listitem><para>not used?</para></listitem>
2114 </varlistentry>
2115 <varlistentry>
2116 <term>DRM_MODE_TYPE_DRIVER</term>
2117 <listitem>
2118 <para>
2119 The mode has been created by the driver (as opposed to
2120 to user-created modes).
2121 </para>
2122 </listitem>
2123 </varlistentry>
2124 </variablelist>
2125 Drivers must set the DRM_MODE_TYPE_DRIVER bit for all modes they
2126 create, and set the DRM_MODE_TYPE_PREFERRED bit for the preferred
2127 mode.
2128 </para>
2129 </listitem>
2130 <listitem>
2131 <synopsis>__u32 clock;</synopsis>
2132 <para>Pixel clock frequency in kHz unit</para>
2133 </listitem>
2134 <listitem>
2135 <synopsis>__u16 hdisplay, hsync_start, hsync_end, htotal;
2136 __u16 vdisplay, vsync_start, vsync_end, vtotal;</synopsis>
2137 <para>Horizontal and vertical timing information</para>
2138 <screen><![CDATA[
2139 Active Front Sync Back
2140 Region Porch Porch
2141 <-----------------------><----------------><-------------><-------------->
2142
2143 //////////////////////|
2144 ////////////////////// |
2145 ////////////////////// |.................. ................
2146 _______________
2147
2148 <----- [hv]display ----->
2149 <------------- [hv]sync_start ------------>
2150 <--------------------- [hv]sync_end --------------------->
2151 <-------------------------------- [hv]total ----------------------------->
2152]]></screen>
2153 </listitem>
2154 <listitem>
2155 <synopsis>__u16 hskew;
2156 __u16 vscan;</synopsis>
2157 <para>Unknown</para>
2158 </listitem>
2159 <listitem>
2160 <synopsis>__u32 flags;</synopsis>
2161 <para>
2162 Mode flags, a combination of
2163 <variablelist>
2164 <varlistentry>
2165 <term>DRM_MODE_FLAG_PHSYNC</term>
2166 <listitem><para>
2167 Horizontal sync is active high
2168 </para></listitem>
2169 </varlistentry>
2170 <varlistentry>
2171 <term>DRM_MODE_FLAG_NHSYNC</term>
2172 <listitem><para>
2173 Horizontal sync is active low
2174 </para></listitem>
2175 </varlistentry>
2176 <varlistentry>
2177 <term>DRM_MODE_FLAG_PVSYNC</term>
2178 <listitem><para>
2179 Vertical sync is active high
2180 </para></listitem>
2181 </varlistentry>
2182 <varlistentry>
2183 <term>DRM_MODE_FLAG_NVSYNC</term>
2184 <listitem><para>
2185 Vertical sync is active low
2186 </para></listitem>
2187 </varlistentry>
2188 <varlistentry>
2189 <term>DRM_MODE_FLAG_INTERLACE</term>
2190 <listitem><para>
2191 Mode is interlaced
2192 </para></listitem>
2193 </varlistentry>
2194 <varlistentry>
2195 <term>DRM_MODE_FLAG_DBLSCAN</term>
2196 <listitem><para>
2197 Mode uses doublescan
2198 </para></listitem>
2199 </varlistentry>
2200 <varlistentry>
2201 <term>DRM_MODE_FLAG_CSYNC</term>
2202 <listitem><para>
2203 Mode uses composite sync
2204 </para></listitem>
2205 </varlistentry>
2206 <varlistentry>
2207 <term>DRM_MODE_FLAG_PCSYNC</term>
2208 <listitem><para>
2209 Composite sync is active high
2210 </para></listitem>
2211 </varlistentry>
2212 <varlistentry>
2213 <term>DRM_MODE_FLAG_NCSYNC</term>
2214 <listitem><para>
2215 Composite sync is active low
2216 </para></listitem>
2217 </varlistentry>
2218 <varlistentry>
2219 <term>DRM_MODE_FLAG_HSKEW</term>
2220 <listitem><para>
2221 hskew provided (not used?)
2222 </para></listitem>
2223 </varlistentry>
2224 <varlistentry>
2225 <term>DRM_MODE_FLAG_BCAST</term>
2226 <listitem><para>
2227 not used?
2228 </para></listitem>
2229 </varlistentry>
2230 <varlistentry>
2231 <term>DRM_MODE_FLAG_PIXMUX</term>
2232 <listitem><para>
2233 not used?
2234 </para></listitem>
2235 </varlistentry>
2236 <varlistentry>
2237 <term>DRM_MODE_FLAG_DBLCLK</term>
2238 <listitem><para>
2239 not used?
2240 </para></listitem>
2241 </varlistentry>
2242 <varlistentry>
2243 <term>DRM_MODE_FLAG_CLKDIV2</term>
2244 <listitem><para>
2245 ?
2246 </para></listitem>
2247 </varlistentry>
2248 </variablelist>
2249 </para>
2250 <para>
2251 Note that modes marked with the INTERLACE or DBLSCAN flags will be
2252 filtered out by
2253 <function>drm_helper_probe_single_connector_modes</function> if
2254 the connector's <structfield>interlace_allowed</structfield> or
2255 <structfield>doublescan_allowed</structfield> field is set to 0.
2256 </para>
2257 </listitem>
2258 <listitem>
2259 <synopsis>char name[DRM_DISPLAY_MODE_LEN];</synopsis>
2260 <para>
2261 Mode name. The driver must call
2262 <function>drm_mode_set_name</function> to fill the mode name from
2263 <structfield>hdisplay</structfield>,
2264 <structfield>vdisplay</structfield> and interlace flag after
2265 filling the corresponding fields.
2266 </para>
2267 </listitem>
2268 </itemizedlist>
2269 </para>
2270 <para>
2271 The <structfield>vrefresh</structfield> value is computed by 1798 The <structfield>vrefresh</structfield> value is computed by
2272 <function>drm_helper_probe_single_connector_modes</function>. 1799 <function>drm_helper_probe_single_connector_modes</function>.
2273 </para> 1800 </para>
@@ -2327,8 +1854,12 @@ void intel_crt_init(struct drm_device *dev)
2327!Edrivers/gpu/drm/drm_atomic_helper.c 1854!Edrivers/gpu/drm/drm_atomic_helper.c
2328 </sect2> 1855 </sect2>
2329 <sect2> 1856 <sect2>
2330 <title>Modeset Helper Functions Reference</title> 1857 <title>Modeset Helper Reference for Common Vtables</title>
2331!Iinclude/drm/drm_crtc_helper.h 1858!Iinclude/drm/drm_modeset_helper_vtables.h
1859!Pinclude/drm/drm_modeset_helper_vtables.h overview
1860 </sect2>
1861 <sect2>
1862 <title>Legacy CRTC/Modeset Helper Functions Reference</title>
2332!Edrivers/gpu/drm/drm_crtc_helper.c 1863!Edrivers/gpu/drm/drm_crtc_helper.c
2333!Pdrivers/gpu/drm/drm_crtc_helper.c overview 1864!Pdrivers/gpu/drm/drm_crtc_helper.c overview
2334 </sect2> 1865 </sect2>
@@ -4201,17 +3732,21 @@ int num_ioctls;</synopsis>
4201 </sect2> 3732 </sect2>
4202 </sect1> 3733 </sect1>
4203 <sect1> 3734 <sect1>
4204 <title>GuC-based Command Submission</title> 3735 <title>GuC</title>
4205 <sect2> 3736 <sect2>
4206 <title>GuC</title> 3737 <title>GuC-specific firmware loader</title>
4207!Pdrivers/gpu/drm/i915/intel_guc_loader.c GuC-specific firmware loader 3738!Pdrivers/gpu/drm/i915/intel_guc_loader.c GuC-specific firmware loader
4208!Idrivers/gpu/drm/i915/intel_guc_loader.c 3739!Idrivers/gpu/drm/i915/intel_guc_loader.c
4209 </sect2> 3740 </sect2>
4210 <sect2> 3741 <sect2>
4211 <title>GuC Client</title> 3742 <title>GuC-based command submission</title>
4212!Pdrivers/gpu/drm/i915/i915_guc_submission.c GuC-based command submissison 3743!Pdrivers/gpu/drm/i915/i915_guc_submission.c GuC-based command submission
4213!Idrivers/gpu/drm/i915/i915_guc_submission.c 3744!Idrivers/gpu/drm/i915/i915_guc_submission.c
4214 </sect2> 3745 </sect2>
3746 <sect2>
3747 <title>GuC Firmware Layout</title>
3748!Pdrivers/gpu/drm/i915/intel_guc_fwif.h GuC Firmware Layout
3749 </sect2>
4215 </sect1> 3750 </sect1>
4216 3751
4217 <sect1> 3752 <sect1>
diff --git a/Documentation/IPMI.txt b/Documentation/IPMI.txt
index 31d1d658827f..c0d8788e75d3 100644
--- a/Documentation/IPMI.txt
+++ b/Documentation/IPMI.txt
@@ -587,7 +587,7 @@ used to control it:
587 587
588 modprobe ipmi_watchdog timeout=<t> pretimeout=<t> action=<action type> 588 modprobe ipmi_watchdog timeout=<t> pretimeout=<t> action=<action type>
589 preaction=<preaction type> preop=<preop type> start_now=x 589 preaction=<preaction type> preop=<preop type> start_now=x
590 nowayout=x ifnum_to_use=n 590 nowayout=x ifnum_to_use=n panic_wdt_timeout=<t>
591 591
592ifnum_to_use specifies which interface the watchdog timer should use. 592ifnum_to_use specifies which interface the watchdog timer should use.
593The default is -1, which means to pick the first one registered. 593The default is -1, which means to pick the first one registered.
@@ -597,7 +597,9 @@ is the amount of seconds before the reset that the pre-timeout panic will
597occur (if pretimeout is zero, then pretimeout will not be enabled). Note 597occur (if pretimeout is zero, then pretimeout will not be enabled). Note
598that the pretimeout is the time before the final timeout. So if the 598that the pretimeout is the time before the final timeout. So if the
599timeout is 50 seconds and the pretimeout is 10 seconds, then the pretimeout 599timeout is 50 seconds and the pretimeout is 10 seconds, then the pretimeout
600will occur in 40 second (10 seconds before the timeout). 600will occur in 40 second (10 seconds before the timeout). The panic_wdt_timeout
601is the value of timeout which is set on kernel panic, in order to let actions
602such as kdump to occur during panic.
601 603
602The action may be "reset", "power_cycle", or "power_off", and 604The action may be "reset", "power_cycle", or "power_off", and
603specifies what to do when the timer times out, and defaults to 605specifies what to do when the timer times out, and defaults to
@@ -634,6 +636,7 @@ for configuring the watchdog:
634 ipmi_watchdog.preop=<preop type> 636 ipmi_watchdog.preop=<preop type>
635 ipmi_watchdog.start_now=x 637 ipmi_watchdog.start_now=x
636 ipmi_watchdog.nowayout=x 638 ipmi_watchdog.nowayout=x
639 ipmi_watchdog.panic_wdt_timeout=<t>
637 640
638The options are the same as the module parameter options. 641The options are the same as the module parameter options.
639 642
diff --git a/Documentation/arm/keystone/Overview.txt b/Documentation/arm/keystone/Overview.txt
index f17bc4c9dff9..400c0c270d2e 100644
--- a/Documentation/arm/keystone/Overview.txt
+++ b/Documentation/arm/keystone/Overview.txt
@@ -49,24 +49,6 @@ specified through DTS. Following are the DTS used:-
49The device tree documentation for the keystone machines are located at 49The device tree documentation for the keystone machines are located at
50 Documentation/devicetree/bindings/arm/keystone/keystone.txt 50 Documentation/devicetree/bindings/arm/keystone/keystone.txt
51 51
52Known issues & workaround
53-------------------------
54
55Some of the device drivers used on keystone are re-used from that from
56DaVinci and other TI SoCs. These device drivers may use clock APIs directly.
57Some of the keystone specific drivers such as netcp uses run time power
58management API instead to enable clock. As this API has limitations on
59keystone, following workaround is needed to boot Linux.
60
61 Add 'clk_ignore_unused' to the bootargs env variable in u-boot. Otherwise
62 clock frameworks will try to disable clocks that are unused and disable
63 the hardware. This is because netcp related power domain and clock
64 domains are enabled in u-boot as run time power management API currently
65 doesn't enable clocks for netcp due to a limitation. This workaround is
66 expected to be removed in the future when proper API support becomes
67 available. Until then, this work around is needed.
68
69
70Document Author 52Document Author
71--------------- 53---------------
72Murali Karicheri <m-karicheri2@ti.com> 54Murali Karicheri <m-karicheri2@ti.com>
diff --git a/Documentation/block/null_blk.txt b/Documentation/block/null_blk.txt
index 2f6c6ff7161d..d8880ca30af4 100644
--- a/Documentation/block/null_blk.txt
+++ b/Documentation/block/null_blk.txt
@@ -70,3 +70,6 @@ use_per_node_hctx=[0/1]: Default: 0
70 parameter. 70 parameter.
71 1: The multi-queue block layer is instantiated with a hardware dispatch 71 1: The multi-queue block layer is instantiated with a hardware dispatch
72 queue for each CPU node in the system. 72 queue for each CPU node in the system.
73
74use_lightnvm=[0/1]: Default: 0
75 Register device with LightNVM. Requires blk-mq to be used.
diff --git a/Documentation/devicetree/bindings/display/exynos/exynos_dp.txt b/Documentation/devicetree/bindings/display/exynos/exynos_dp.txt
index 64693f2ebc51..fe4a7a2dea9c 100644
--- a/Documentation/devicetree/bindings/display/exynos/exynos_dp.txt
+++ b/Documentation/devicetree/bindings/display/exynos/exynos_dp.txt
@@ -1,3 +1,20 @@
1Device-Tree bindings for Samsung Exynos Embedded DisplayPort Transmitter(eDP)
2
3DisplayPort is industry standard to accommodate the growing board adoption
4of digital display technology within the PC and CE industries.
5It consolidates the internal and external connection methods to reduce device
6complexity and cost. It also supports necessary features for important cross
7industry applications and provides performance scalability to enable the next
8generation of displays that feature higher color depths, refresh rates, and
9display resolutions.
10
11eDP (embedded display port) device is compliant with Embedded DisplayPort
12standard as follows,
13- DisplayPort standard 1.1a for Exynos5250 and Exynos5260.
14- DisplayPort standard 1.3 for Exynos5422s and Exynos5800.
15
16eDP resides between FIMD and panel or FIMD and bridge such as LVDS.
17
1The Exynos display port interface should be configured based on 18The Exynos display port interface should be configured based on
2the type of panel connected to it. 19the type of panel connected to it.
3 20
@@ -66,8 +83,15 @@ Optional properties for dp-controller:
66 Hotplug detect GPIO. 83 Hotplug detect GPIO.
67 Indicates which GPIO should be used for hotplug 84 Indicates which GPIO should be used for hotplug
68 detection 85 detection
69 -video interfaces: Device node can contain video interface port 86Video interfaces:
70 nodes according to [1]. 87 Device node can contain video interface port nodes according to [1].
88 The following are properties specific to those nodes:
89
90 endpoint node connected to bridge or panel node:
91 - remote-endpoint: specifies the endpoint in panel or bridge node.
92 This node is required in all kinds of exynos dp
93 to represent the connection between dp and bridge
94 or dp and panel.
71 95
72[1]: Documentation/devicetree/bindings/media/video-interfaces.txt 96[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
73 97
@@ -111,9 +135,18 @@ Board Specific portion:
111 }; 135 };
112 136
113 ports { 137 ports {
114 port@0 { 138 port {
115 dp_out: endpoint { 139 dp_out: endpoint {
116 remote-endpoint = <&bridge_in>; 140 remote-endpoint = <&dp_in>;
141 };
142 };
143 };
144
145 panel {
146 ...
147 port {
148 dp_in: endpoint {
149 remote-endpoint = <&dp_out>;
117 }; 150 };
118 }; 151 };
119 }; 152 };
diff --git a/Documentation/devicetree/bindings/display/msm/dsi.txt b/Documentation/devicetree/bindings/display/msm/dsi.txt
index f344b9e49198..e7423bea1424 100644
--- a/Documentation/devicetree/bindings/display/msm/dsi.txt
+++ b/Documentation/devicetree/bindings/display/msm/dsi.txt
@@ -14,17 +14,20 @@ Required properties:
14- clocks: device clocks 14- clocks: device clocks
15 See Documentation/devicetree/bindings/clocks/clock-bindings.txt for details. 15 See Documentation/devicetree/bindings/clocks/clock-bindings.txt for details.
16- clock-names: the following clocks are required: 16- clock-names: the following clocks are required:
17 * "mdp_core_clk"
18 * "iface_clk"
17 * "bus_clk" 19 * "bus_clk"
18 * "byte_clk"
19 * "core_clk"
20 * "core_mmss_clk" 20 * "core_mmss_clk"
21 * "iface_clk" 21 * "byte_clk"
22 * "mdp_core_clk"
23 * "pixel_clk" 22 * "pixel_clk"
23 * "core_clk"
24 For DSIv2, we need an additional clock:
25 * "src_clk"
24- vdd-supply: phandle to vdd regulator device node 26- vdd-supply: phandle to vdd regulator device node
25- vddio-supply: phandle to vdd-io regulator device node 27- vddio-supply: phandle to vdd-io regulator device node
26- vdda-supply: phandle to vdda regulator device node 28- vdda-supply: phandle to vdda regulator device node
27- qcom,dsi-phy: phandle to DSI PHY device node 29- qcom,dsi-phy: phandle to DSI PHY device node
30- syscon-sfpb: A phandle to mmss_sfpb syscon node (only for DSIv2)
28 31
29Optional properties: 32Optional properties:
30- panel@0: Node of panel connected to this DSI controller. 33- panel@0: Node of panel connected to this DSI controller.
@@ -51,6 +54,7 @@ Required properties:
51 * "qcom,dsi-phy-28nm-hpm" 54 * "qcom,dsi-phy-28nm-hpm"
52 * "qcom,dsi-phy-28nm-lp" 55 * "qcom,dsi-phy-28nm-lp"
53 * "qcom,dsi-phy-20nm" 56 * "qcom,dsi-phy-20nm"
57 * "qcom,dsi-phy-28nm-8960"
54- reg: Physical base address and length of the registers of PLL, PHY and PHY 58- reg: Physical base address and length of the registers of PLL, PHY and PHY
55 regulator 59 regulator
56- reg-names: The names of register regions. The following regions are required: 60- reg-names: The names of register regions. The following regions are required:
diff --git a/Documentation/devicetree/bindings/display/msm/mdp.txt b/Documentation/devicetree/bindings/display/msm/mdp.txt
index 0833edaba4c3..a214f6cd0363 100644
--- a/Documentation/devicetree/bindings/display/msm/mdp.txt
+++ b/Documentation/devicetree/bindings/display/msm/mdp.txt
@@ -2,18 +2,28 @@ Qualcomm adreno/snapdragon display controller
2 2
3Required properties: 3Required properties:
4- compatible: 4- compatible:
5 * "qcom,mdp" - mdp4 5 * "qcom,mdp4" - mdp4
6 * "qcom,mdp5" - mdp5
6- reg: Physical base address and length of the controller's registers. 7- reg: Physical base address and length of the controller's registers.
7- interrupts: The interrupt signal from the display controller. 8- interrupts: The interrupt signal from the display controller.
8- connectors: array of phandles for output device(s) 9- connectors: array of phandles for output device(s)
9- clocks: device clocks 10- clocks: device clocks
10 See ../clocks/clock-bindings.txt for details. 11 See ../clocks/clock-bindings.txt for details.
11- clock-names: the following clocks are required: 12- clock-names: the following clocks are required.
12 * "core_clk" 13 For MDP4:
13 * "iface_clk" 14 * "core_clk"
14 * "src_clk" 15 * "iface_clk"
15 * "hdmi_clk" 16 * "lut_clk"
16 * "mpd_clk" 17 * "src_clk"
18 * "hdmi_clk"
19 * "mdp_clk"
20 For MDP5:
21 * "bus_clk"
22 * "iface_clk"
23 * "core_clk_src"
24 * "core_clk"
25 * "lut_clk" (some MDP5 versions may not need this)
26 * "vsync_clk"
17 27
18Optional properties: 28Optional properties:
19- gpus: phandle for gpu device 29- gpus: phandle for gpu device
@@ -26,7 +36,7 @@ Example:
26 ... 36 ...
27 37
28 mdp: qcom,mdp@5100000 { 38 mdp: qcom,mdp@5100000 {
29 compatible = "qcom,mdp"; 39 compatible = "qcom,mdp4";
30 reg = <0x05100000 0xf0000>; 40 reg = <0x05100000 0xf0000>;
31 interrupts = <GIC_SPI 75 0>; 41 interrupts = <GIC_SPI 75 0>;
32 connectors = <&hdmi>; 42 connectors = <&hdmi>;
diff --git a/Documentation/devicetree/bindings/media/exynos5-gsc.txt b/Documentation/devicetree/bindings/media/exynos5-gsc.txt
index 0604d42f38d1..5fe9372abb37 100644
--- a/Documentation/devicetree/bindings/media/exynos5-gsc.txt
+++ b/Documentation/devicetree/bindings/media/exynos5-gsc.txt
@@ -7,6 +7,10 @@ Required properties:
7- reg: should contain G-Scaler physical address location and length. 7- reg: should contain G-Scaler physical address location and length.
8- interrupts: should contain G-Scaler interrupt number 8- interrupts: should contain G-Scaler interrupt number
9 9
10Optional properties:
11- samsung,sysreg: handle to syscon used to control the system registers to
12 set writeback input and destination
13
10Example: 14Example:
11 15
12gsc_0: gsc@0x13e00000 { 16gsc_0: gsc@0x13e00000 {
diff --git a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
index f5a8ca29aff0..aeea50c84e92 100644
--- a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
+++ b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
@@ -8,6 +8,11 @@ Required properties:
8- phy-mode: See ethernet.txt file in the same directory 8- phy-mode: See ethernet.txt file in the same directory
9- clocks: a pointer to the reference clock for this device. 9- clocks: a pointer to the reference clock for this device.
10 10
11Optional properties:
12- tx-csum-limit: maximum mtu supported by port that allow TX checksum.
13 Value is presented in bytes. If not used, by default 1600B is set for
14 "marvell,armada-370-neta" and 9800B for others.
15
11Example: 16Example:
12 17
13ethernet@d0070000 { 18ethernet@d0070000 {
@@ -15,6 +20,7 @@ ethernet@d0070000 {
15 reg = <0xd0070000 0x2500>; 20 reg = <0xd0070000 0x2500>;
16 interrupts = <8>; 21 interrupts = <8>;
17 clocks = <&gate_clk 4>; 22 clocks = <&gate_clk 4>;
23 tx-csum-limit = <9800>
18 status = "okay"; 24 status = "okay";
19 phy = <&phy0>; 25 phy = <&phy0>;
20 phy-mode = "rgmii-id"; 26 phy-mode = "rgmii-id";
diff --git a/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt b/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt
index b38200d2583a..0dfa60d88dd3 100644
--- a/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt
@@ -1,7 +1,9 @@
1* Temperature Sensor ADC (TSADC) on rockchip SoCs 1* Temperature Sensor ADC (TSADC) on rockchip SoCs
2 2
3Required properties: 3Required properties:
4- compatible : "rockchip,rk3288-tsadc" 4- compatible : should be "rockchip,<name>-tsadc"
5 "rockchip,rk3288-tsadc": found on RK3288 SoCs
6 "rockchip,rk3368-tsadc": found on RK3368 SoCs
5- reg : physical base address of the controller and length of memory mapped 7- reg : physical base address of the controller and length of memory mapped
6 region. 8 region.
7- interrupts : The interrupt number to the cpu. The interrupt specifier format 9- interrupts : The interrupt number to the cpu. The interrupt specifier format
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
index 6a4b1af724f8..1bba38dd2637 100644
--- a/Documentation/i2c/busses/i2c-i801
+++ b/Documentation/i2c/busses/i2c-i801
@@ -32,6 +32,7 @@ Supported adapters:
32 * Intel Sunrise Point-LP (PCH) 32 * Intel Sunrise Point-LP (PCH)
33 * Intel DNV (SOC) 33 * Intel DNV (SOC)
34 * Intel Broxton (SOC) 34 * Intel Broxton (SOC)
35 * Intel Lewisburg (PCH)
35 Datasheets: Publicly available at the Intel website 36 Datasheets: Publicly available at the Intel website
36 37
37On Intel Patsburg and later chipsets, both the normal host SMBus controller 38On Intel Patsburg and later chipsets, both the normal host SMBus controller
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index f8aae632f02f..742f69d18fc8 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1583,9 +1583,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1583 hwp_only 1583 hwp_only
1584 Only load intel_pstate on systems which support 1584 Only load intel_pstate on systems which support
1585 hardware P state control (HWP) if available. 1585 hardware P state control (HWP) if available.
1586 no_acpi
1587 Don't use ACPI processor performance control objects
1588 _PSS and _PPC specified limits.
1589 1586
1590 intremap= [X86-64, Intel-IOMMU] 1587 intremap= [X86-64, Intel-IOMMU]
1591 on enable Interrupt Remapping (default) 1588 on enable Interrupt Remapping (default)
diff --git a/MAINTAINERS b/MAINTAINERS
index e9caa4b28828..69c8a9c3289a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -318,7 +318,7 @@ M: Zhang Rui <rui.zhang@intel.com>
318L: linux-acpi@vger.kernel.org 318L: linux-acpi@vger.kernel.org
319W: https://01.org/linux-acpi 319W: https://01.org/linux-acpi
320S: Supported 320S: Supported
321F: drivers/acpi/video.c 321F: drivers/acpi/acpi_video.c
322 322
323ACPI WMI DRIVER 323ACPI WMI DRIVER
324L: platform-driver-x86@vger.kernel.org 324L: platform-driver-x86@vger.kernel.org
@@ -1847,7 +1847,7 @@ S: Supported
1847F: drivers/net/wireless/ath/ath6kl/ 1847F: drivers/net/wireless/ath/ath6kl/
1848 1848
1849WILOCITY WIL6210 WIRELESS DRIVER 1849WILOCITY WIL6210 WIRELESS DRIVER
1850M: Vladimir Kondratiev <qca_vkondrat@qca.qualcomm.com> 1850M: Maya Erez <qca_merez@qca.qualcomm.com>
1851L: linux-wireless@vger.kernel.org 1851L: linux-wireless@vger.kernel.org
1852L: wil6210@qca.qualcomm.com 1852L: wil6210@qca.qualcomm.com
1853S: Supported 1853S: Supported
@@ -1931,7 +1931,7 @@ S: Supported
1931F: drivers/i2c/busses/i2c-at91.c 1931F: drivers/i2c/busses/i2c-at91.c
1932 1932
1933ATMEL ISI DRIVER 1933ATMEL ISI DRIVER
1934M: Josh Wu <josh.wu@atmel.com> 1934M: Ludovic Desroches <ludovic.desroches@atmel.com>
1935L: linux-media@vger.kernel.org 1935L: linux-media@vger.kernel.org
1936S: Supported 1936S: Supported
1937F: drivers/media/platform/soc_camera/atmel-isi.c 1937F: drivers/media/platform/soc_camera/atmel-isi.c
@@ -1950,7 +1950,8 @@ S: Supported
1950F: drivers/net/ethernet/cadence/ 1950F: drivers/net/ethernet/cadence/
1951 1951
1952ATMEL NAND DRIVER 1952ATMEL NAND DRIVER
1953M: Josh Wu <josh.wu@atmel.com> 1953M: Wenyou Yang <wenyou.yang@atmel.com>
1954M: Josh Wu <rainyfeeling@outlook.com>
1954L: linux-mtd@lists.infradead.org 1955L: linux-mtd@lists.infradead.org
1955S: Supported 1956S: Supported
1956F: drivers/mtd/nand/atmel_nand* 1957F: drivers/mtd/nand/atmel_nand*
@@ -2449,7 +2450,9 @@ F: drivers/firmware/broadcom/*
2449 2450
2450BROADCOM STB NAND FLASH DRIVER 2451BROADCOM STB NAND FLASH DRIVER
2451M: Brian Norris <computersforpeace@gmail.com> 2452M: Brian Norris <computersforpeace@gmail.com>
2453M: Kamal Dasu <kdasu.kdev@gmail.com>
2452L: linux-mtd@lists.infradead.org 2454L: linux-mtd@lists.infradead.org
2455L: bcm-kernel-feedback-list@broadcom.com
2453S: Maintained 2456S: Maintained
2454F: drivers/mtd/nand/brcmnand/ 2457F: drivers/mtd/nand/brcmnand/
2455 2458
@@ -2546,7 +2549,7 @@ F: arch/c6x/
2546 2549
2547CACHEFILES: FS-CACHE BACKEND FOR CACHING ON MOUNTED FILESYSTEMS 2550CACHEFILES: FS-CACHE BACKEND FOR CACHING ON MOUNTED FILESYSTEMS
2548M: David Howells <dhowells@redhat.com> 2551M: David Howells <dhowells@redhat.com>
2549L: linux-cachefs@redhat.com 2552L: linux-cachefs@redhat.com (moderated for non-subscribers)
2550S: Supported 2553S: Supported
2551F: Documentation/filesystems/caching/cachefiles.txt 2554F: Documentation/filesystems/caching/cachefiles.txt
2552F: fs/cachefiles/ 2555F: fs/cachefiles/
@@ -2929,10 +2932,9 @@ S: Maintained
2929F: drivers/platform/x86/compal-laptop.c 2932F: drivers/platform/x86/compal-laptop.c
2930 2933
2931CONEXANT ACCESSRUNNER USB DRIVER 2934CONEXANT ACCESSRUNNER USB DRIVER
2932M: Simon Arlott <cxacru@fire.lp0.eu>
2933L: accessrunner-general@lists.sourceforge.net 2935L: accessrunner-general@lists.sourceforge.net
2934W: http://accessrunner.sourceforge.net/ 2936W: http://accessrunner.sourceforge.net/
2935S: Maintained 2937S: Orphan
2936F: drivers/usb/atm/cxacru.c 2938F: drivers/usb/atm/cxacru.c
2937 2939
2938CONFIGFS 2940CONFIGFS
@@ -4409,6 +4411,7 @@ K: fmc_d.*register
4409 4411
4410FPGA MANAGER FRAMEWORK 4412FPGA MANAGER FRAMEWORK
4411M: Alan Tull <atull@opensource.altera.com> 4413M: Alan Tull <atull@opensource.altera.com>
4414R: Moritz Fischer <moritz.fischer@ettus.com>
4412S: Maintained 4415S: Maintained
4413F: drivers/fpga/ 4416F: drivers/fpga/
4414F: include/linux/fpga/fpga-mgr.h 4417F: include/linux/fpga/fpga-mgr.h
@@ -4559,7 +4562,7 @@ F: include/linux/frontswap.h
4559 4562
4560FS-CACHE: LOCAL CACHING FOR NETWORK FILESYSTEMS 4563FS-CACHE: LOCAL CACHING FOR NETWORK FILESYSTEMS
4561M: David Howells <dhowells@redhat.com> 4564M: David Howells <dhowells@redhat.com>
4562L: linux-cachefs@redhat.com 4565L: linux-cachefs@redhat.com (moderated for non-subscribers)
4563S: Supported 4566S: Supported
4564F: Documentation/filesystems/caching/ 4567F: Documentation/filesystems/caching/
4565F: fs/fscache/ 4568F: fs/fscache/
@@ -5711,13 +5714,6 @@ M: Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar>
5711S: Maintained 5714S: Maintained
5712F: net/ipv4/netfilter/ipt_MASQUERADE.c 5715F: net/ipv4/netfilter/ipt_MASQUERADE.c
5713 5716
5714IP1000A 10/100/1000 GIGABIT ETHERNET DRIVER
5715M: Francois Romieu <romieu@fr.zoreil.com>
5716M: Sorbica Shieh <sorbica@icplus.com.tw>
5717L: netdev@vger.kernel.org
5718S: Maintained
5719F: drivers/net/ethernet/icplus/ipg.*
5720
5721IPATH DRIVER 5717IPATH DRIVER
5722M: Mike Marciniszyn <infinipath@intel.com> 5718M: Mike Marciniszyn <infinipath@intel.com>
5723L: linux-rdma@vger.kernel.org 5719L: linux-rdma@vger.kernel.org
@@ -6371,6 +6367,7 @@ F: arch/*/include/asm/pmem.h
6371LIGHTNVM PLATFORM SUPPORT 6367LIGHTNVM PLATFORM SUPPORT
6372M: Matias Bjorling <mb@lightnvm.io> 6368M: Matias Bjorling <mb@lightnvm.io>
6373W: http://github/OpenChannelSSD 6369W: http://github/OpenChannelSSD
6370L: linux-block@vger.kernel.org
6374S: Maintained 6371S: Maintained
6375F: drivers/lightnvm/ 6372F: drivers/lightnvm/
6376F: include/linux/lightnvm.h 6373F: include/linux/lightnvm.h
@@ -6923,13 +6920,21 @@ F: drivers/scsi/megaraid.*
6923F: drivers/scsi/megaraid/ 6920F: drivers/scsi/megaraid/
6924 6921
6925MELLANOX ETHERNET DRIVER (mlx4_en) 6922MELLANOX ETHERNET DRIVER (mlx4_en)
6926M: Amir Vadai <amirv@mellanox.com> 6923M: Eugenia Emantayev <eugenia@mellanox.com>
6927L: netdev@vger.kernel.org 6924L: netdev@vger.kernel.org
6928S: Supported 6925S: Supported
6929W: http://www.mellanox.com 6926W: http://www.mellanox.com
6930Q: http://patchwork.ozlabs.org/project/netdev/list/ 6927Q: http://patchwork.ozlabs.org/project/netdev/list/
6931F: drivers/net/ethernet/mellanox/mlx4/en_* 6928F: drivers/net/ethernet/mellanox/mlx4/en_*
6932 6929
6930MELLANOX ETHERNET DRIVER (mlx5e)
6931M: Saeed Mahameed <saeedm@mellanox.com>
6932L: netdev@vger.kernel.org
6933S: Supported
6934W: http://www.mellanox.com
6935Q: http://patchwork.ozlabs.org/project/netdev/list/
6936F: drivers/net/ethernet/mellanox/mlx5/core/en_*
6937
6933MELLANOX ETHERNET SWITCH DRIVERS 6938MELLANOX ETHERNET SWITCH DRIVERS
6934M: Jiri Pirko <jiri@mellanox.com> 6939M: Jiri Pirko <jiri@mellanox.com>
6935M: Ido Schimmel <idosch@mellanox.com> 6940M: Ido Schimmel <idosch@mellanox.com>
@@ -7901,6 +7906,18 @@ S: Maintained
7901F: net/openvswitch/ 7906F: net/openvswitch/
7902F: include/uapi/linux/openvswitch.h 7907F: include/uapi/linux/openvswitch.h
7903 7908
7909OPERATING PERFORMANCE POINTS (OPP)
7910M: Viresh Kumar <vireshk@kernel.org>
7911M: Nishanth Menon <nm@ti.com>
7912M: Stephen Boyd <sboyd@codeaurora.org>
7913L: linux-pm@vger.kernel.org
7914S: Maintained
7915T: git git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm.git
7916F: drivers/base/power/opp/
7917F: include/linux/pm_opp.h
7918F: Documentation/power/opp.txt
7919F: Documentation/devicetree/bindings/opp/
7920
7904OPL4 DRIVER 7921OPL4 DRIVER
7905M: Clemens Ladisch <clemens@ladisch.de> 7922M: Clemens Ladisch <clemens@ladisch.de>
7906L: alsa-devel@alsa-project.org (moderated for non-subscribers) 7923L: alsa-devel@alsa-project.org (moderated for non-subscribers)
@@ -9314,7 +9331,6 @@ F: drivers/i2c/busses/i2c-designware-*
9314F: include/linux/platform_data/i2c-designware.h 9331F: include/linux/platform_data/i2c-designware.h
9315 9332
9316SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER 9333SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER
9317M: Seungwon Jeon <tgih.jun@samsung.com>
9318M: Jaehoon Chung <jh80.chung@samsung.com> 9334M: Jaehoon Chung <jh80.chung@samsung.com>
9319L: linux-mmc@vger.kernel.org 9335L: linux-mmc@vger.kernel.org
9320S: Maintained 9336S: Maintained
@@ -9411,8 +9427,10 @@ F: include/scsi/sg.h
9411 9427
9412SCSI SUBSYSTEM 9428SCSI SUBSYSTEM
9413M: "James E.J. Bottomley" <JBottomley@odin.com> 9429M: "James E.J. Bottomley" <JBottomley@odin.com>
9414L: linux-scsi@vger.kernel.org
9415T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git 9430T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git
9431M: "Martin K. Petersen" <martin.petersen@oracle.com>
9432T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkp/scsi.git
9433L: linux-scsi@vger.kernel.org
9416S: Maintained 9434S: Maintained
9417F: drivers/scsi/ 9435F: drivers/scsi/
9418F: include/scsi/ 9436F: include/scsi/
@@ -10887,9 +10905,9 @@ S: Maintained
10887F: drivers/media/tuners/tua9001* 10905F: drivers/media/tuners/tua9001*
10888 10906
10889TULIP NETWORK DRIVERS 10907TULIP NETWORK DRIVERS
10890M: Grant Grundler <grundler@parisc-linux.org>
10891L: netdev@vger.kernel.org 10908L: netdev@vger.kernel.org
10892S: Maintained 10909L: linux-parisc@vger.kernel.org
10910S: Orphan
10893F: drivers/net/ethernet/dec/tulip/ 10911F: drivers/net/ethernet/dec/tulip/
10894 10912
10895TUN/TAP driver 10913TUN/TAP driver
diff --git a/Makefile b/Makefile
index 3a0234f50f36..d644f6e92cf6 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 4 2PATCHLEVEL = 4
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc1 4EXTRAVERSION = -rc4
5NAME = Blurry Fish Butt 5NAME = Blurry Fish Butt
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig
index c92c0ef1e9d2..f1ac9818b751 100644
--- a/arch/arc/configs/axs101_defconfig
+++ b/arch/arc/configs/axs101_defconfig
@@ -1,4 +1,4 @@
1CONFIG_CROSS_COMPILE="arc-linux-uclibc-" 1CONFIG_CROSS_COMPILE="arc-linux-"
2CONFIG_DEFAULT_HOSTNAME="ARCLinux" 2CONFIG_DEFAULT_HOSTNAME="ARCLinux"
3# CONFIG_SWAP is not set 3# CONFIG_SWAP is not set
4CONFIG_SYSVIPC=y 4CONFIG_SYSVIPC=y
diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig
index cfac24e0e7b6..323486d6ee83 100644
--- a/arch/arc/configs/axs103_defconfig
+++ b/arch/arc/configs/axs103_defconfig
@@ -1,4 +1,4 @@
1CONFIG_CROSS_COMPILE="arc-linux-uclibc-" 1CONFIG_CROSS_COMPILE="arc-linux-"
2CONFIG_DEFAULT_HOSTNAME="ARCLinux" 2CONFIG_DEFAULT_HOSTNAME="ARCLinux"
3# CONFIG_SWAP is not set 3# CONFIG_SWAP is not set
4CONFIG_SYSVIPC=y 4CONFIG_SYSVIPC=y
diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig
index 9922a118a15a..66191cd0447e 100644
--- a/arch/arc/configs/axs103_smp_defconfig
+++ b/arch/arc/configs/axs103_smp_defconfig
@@ -1,4 +1,4 @@
1CONFIG_CROSS_COMPILE="arc-linux-uclibc-" 1CONFIG_CROSS_COMPILE="arc-linux-"
2CONFIG_DEFAULT_HOSTNAME="ARCLinux" 2CONFIG_DEFAULT_HOSTNAME="ARCLinux"
3# CONFIG_SWAP is not set 3# CONFIG_SWAP is not set
4CONFIG_SYSVIPC=y 4CONFIG_SYSVIPC=y
diff --git a/arch/arc/configs/nsim_hs_defconfig b/arch/arc/configs/nsim_hs_defconfig
index f761a7c70761..f68838e8068a 100644
--- a/arch/arc/configs/nsim_hs_defconfig
+++ b/arch/arc/configs/nsim_hs_defconfig
@@ -1,4 +1,4 @@
1CONFIG_CROSS_COMPILE="arc-linux-uclibc-" 1CONFIG_CROSS_COMPILE="arc-linux-"
2# CONFIG_LOCALVERSION_AUTO is not set 2# CONFIG_LOCALVERSION_AUTO is not set
3CONFIG_DEFAULT_HOSTNAME="ARCLinux" 3CONFIG_DEFAULT_HOSTNAME="ARCLinux"
4# CONFIG_SWAP is not set 4# CONFIG_SWAP is not set
diff --git a/arch/arc/configs/nsim_hs_smp_defconfig b/arch/arc/configs/nsim_hs_smp_defconfig
index dc6f74f41283..96bd1c20fb0b 100644
--- a/arch/arc/configs/nsim_hs_smp_defconfig
+++ b/arch/arc/configs/nsim_hs_smp_defconfig
@@ -1,4 +1,4 @@
1CONFIG_CROSS_COMPILE="arc-linux-uclibc-" 1CONFIG_CROSS_COMPILE="arc-linux-"
2# CONFIG_LOCALVERSION_AUTO is not set 2# CONFIG_LOCALVERSION_AUTO is not set
3CONFIG_DEFAULT_HOSTNAME="ARCLinux" 3CONFIG_DEFAULT_HOSTNAME="ARCLinux"
4# CONFIG_SWAP is not set 4# CONFIG_SWAP is not set
diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig
index 3fef0a210c56..fcae66683ca0 100644
--- a/arch/arc/configs/nsimosci_hs_defconfig
+++ b/arch/arc/configs/nsimosci_hs_defconfig
@@ -1,4 +1,4 @@
1CONFIG_CROSS_COMPILE="arc-linux-uclibc-" 1CONFIG_CROSS_COMPILE="arc-linux-"
2# CONFIG_LOCALVERSION_AUTO is not set 2# CONFIG_LOCALVERSION_AUTO is not set
3CONFIG_DEFAULT_HOSTNAME="ARCLinux" 3CONFIG_DEFAULT_HOSTNAME="ARCLinux"
4# CONFIG_SWAP is not set 4# CONFIG_SWAP is not set
diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig
index 51784837daae..b01b659168ea 100644
--- a/arch/arc/configs/nsimosci_hs_smp_defconfig
+++ b/arch/arc/configs/nsimosci_hs_smp_defconfig
@@ -1,4 +1,4 @@
1CONFIG_CROSS_COMPILE="arc-linux-uclibc-" 1CONFIG_CROSS_COMPILE="arc-linux-"
2CONFIG_DEFAULT_HOSTNAME="ARCLinux" 2CONFIG_DEFAULT_HOSTNAME="ARCLinux"
3# CONFIG_SWAP is not set 3# CONFIG_SWAP is not set
4CONFIG_SYSVIPC=y 4CONFIG_SYSVIPC=y
diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig
index ef35ef3923dd..a07f20de221b 100644
--- a/arch/arc/configs/vdk_hs38_defconfig
+++ b/arch/arc/configs/vdk_hs38_defconfig
@@ -1,4 +1,4 @@
1CONFIG_CROSS_COMPILE="arc-linux-uclibc-" 1CONFIG_CROSS_COMPILE="arc-linux-"
2# CONFIG_LOCALVERSION_AUTO is not set 2# CONFIG_LOCALVERSION_AUTO is not set
3CONFIG_DEFAULT_HOSTNAME="ARCLinux" 3CONFIG_DEFAULT_HOSTNAME="ARCLinux"
4# CONFIG_CROSS_MEMORY_ATTACH is not set 4# CONFIG_CROSS_MEMORY_ATTACH is not set
diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig
index 634509e5e572..f36c047b33ca 100644
--- a/arch/arc/configs/vdk_hs38_smp_defconfig
+++ b/arch/arc/configs/vdk_hs38_smp_defconfig
@@ -1,4 +1,4 @@
1CONFIG_CROSS_COMPILE="arc-linux-uclibc-" 1CONFIG_CROSS_COMPILE="arc-linux-"
2# CONFIG_LOCALVERSION_AUTO is not set 2# CONFIG_LOCALVERSION_AUTO is not set
3CONFIG_DEFAULT_HOSTNAME="ARCLinux" 3CONFIG_DEFAULT_HOSTNAME="ARCLinux"
4# CONFIG_CROSS_MEMORY_ATTACH is not set 4# CONFIG_CROSS_MEMORY_ATTACH is not set
diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h
index ad481c24070d..258b0e5ad332 100644
--- a/arch/arc/include/asm/irqflags-arcv2.h
+++ b/arch/arc/include/asm/irqflags-arcv2.h
@@ -37,6 +37,9 @@
37#define ISA_INIT_STATUS_BITS (STATUS_IE_MASK | STATUS_AD_MASK | \ 37#define ISA_INIT_STATUS_BITS (STATUS_IE_MASK | STATUS_AD_MASK | \
38 (ARCV2_IRQ_DEF_PRIO << 1)) 38 (ARCV2_IRQ_DEF_PRIO << 1))
39 39
40/* SLEEP needs default irq priority (<=) which can interrupt the doze */
41#define ISA_SLEEP_ARG (0x10 | ARCV2_IRQ_DEF_PRIO)
42
40#ifndef __ASSEMBLY__ 43#ifndef __ASSEMBLY__
41 44
42/* 45/*
diff --git a/arch/arc/include/asm/irqflags-compact.h b/arch/arc/include/asm/irqflags-compact.h
index d8c608174617..c1d36458bfb7 100644
--- a/arch/arc/include/asm/irqflags-compact.h
+++ b/arch/arc/include/asm/irqflags-compact.h
@@ -43,6 +43,8 @@
43 43
44#define ISA_INIT_STATUS_BITS STATUS_IE_MASK 44#define ISA_INIT_STATUS_BITS STATUS_IE_MASK
45 45
46#define ISA_SLEEP_ARG 0x3
47
46#ifndef __ASSEMBLY__ 48#ifndef __ASSEMBLY__
47 49
48/****************************************************************** 50/******************************************************************
diff --git a/arch/arc/kernel/ctx_sw.c b/arch/arc/kernel/ctx_sw.c
index c14a5bea0c76..5d446df2c413 100644
--- a/arch/arc/kernel/ctx_sw.c
+++ b/arch/arc/kernel/ctx_sw.c
@@ -58,8 +58,6 @@ __switch_to(struct task_struct *prev_task, struct task_struct *next_task)
58 "st sp, [r24] \n\t" 58 "st sp, [r24] \n\t"
59#endif 59#endif
60 60
61 "sync \n\t"
62
63 /* 61 /*
64 * setup _current_task with incoming tsk. 62 * setup _current_task with incoming tsk.
65 * optionally, set r25 to that as well 63 * optionally, set r25 to that as well
diff --git a/arch/arc/kernel/ctx_sw_asm.S b/arch/arc/kernel/ctx_sw_asm.S
index e248594097e7..e6890b1f8650 100644
--- a/arch/arc/kernel/ctx_sw_asm.S
+++ b/arch/arc/kernel/ctx_sw_asm.S
@@ -44,9 +44,6 @@ __switch_to:
44 * don't need to do anything special to return it 44 * don't need to do anything special to return it
45 */ 45 */
46 46
47 /* hardware memory barrier */
48 sync
49
50 /* 47 /*
51 * switch to new task, contained in r1 48 * switch to new task, contained in r1
52 * Temp reg r3 is required to get the ptr to store val 49 * Temp reg r3 is required to get the ptr to store val
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index 91d5a0f1f3f7..a3f750e76b68 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -44,11 +44,10 @@ SYSCALL_DEFINE0(arc_gettls)
44void arch_cpu_idle(void) 44void arch_cpu_idle(void)
45{ 45{
46 /* sleep, but enable all interrupts before committing */ 46 /* sleep, but enable all interrupts before committing */
47 if (is_isa_arcompact()) { 47 __asm__ __volatile__(
48 __asm__("sleep 0x3"); 48 "sleep %0 \n"
49 } else { 49 :
50 __asm__("sleep 0x10"); 50 :"I"(ISA_SLEEP_ARG)); /* can't be "r" has to be embedded const */
51 }
52} 51}
53 52
54asmlinkage void ret_from_fork(void); 53asmlinkage void ret_from_fork(void);
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
index 93c6ea52b671..7352475451f6 100644
--- a/arch/arc/kernel/unwind.c
+++ b/arch/arc/kernel/unwind.c
@@ -986,42 +986,13 @@ int arc_unwind(struct unwind_frame_info *frame)
986 (const u8 *)(fde + 986 (const u8 *)(fde +
987 1) + 987 1) +
988 *fde, ptrType); 988 *fde, ptrType);
989 if (pc >= endLoc) 989 if (pc >= endLoc) {
990 fde = NULL; 990 fde = NULL;
991 } else
992 fde = NULL;
993 }
994 if (fde == NULL) {
995 for (fde = table->address, tableSize = table->size;
996 cie = NULL, tableSize > sizeof(*fde)
997 && tableSize - sizeof(*fde) >= *fde;
998 tableSize -= sizeof(*fde) + *fde,
999 fde += 1 + *fde / sizeof(*fde)) {
1000 cie = cie_for_fde(fde, table);
1001 if (cie == &bad_cie) {
1002 cie = NULL; 991 cie = NULL;
1003 break;
1004 } 992 }
1005 if (cie == NULL 993 } else {
1006 || cie == &not_fde 994 fde = NULL;
1007 || (ptrType = fde_pointer_type(cie)) < 0) 995 cie = NULL;
1008 continue;
1009 ptr = (const u8 *)(fde + 2);
1010 startLoc = read_pointer(&ptr,
1011 (const u8 *)(fde + 1) +
1012 *fde, ptrType);
1013 if (!startLoc)
1014 continue;
1015 if (!(ptrType & DW_EH_PE_indirect))
1016 ptrType &=
1017 DW_EH_PE_FORM | DW_EH_PE_signed;
1018 endLoc =
1019 startLoc + read_pointer(&ptr,
1020 (const u8 *)(fde +
1021 1) +
1022 *fde, ptrType);
1023 if (pc >= startLoc && pc < endLoc)
1024 break;
1025 } 996 }
1026 } 997 }
1027 } 998 }
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index 0ee739846847..daf2bf52b984 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -619,10 +619,10 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
619 619
620 int dirty = !test_and_set_bit(PG_dc_clean, &page->flags); 620 int dirty = !test_and_set_bit(PG_dc_clean, &page->flags);
621 if (dirty) { 621 if (dirty) {
622 /* wback + inv dcache lines */ 622 /* wback + inv dcache lines (K-mapping) */
623 __flush_dcache_page(paddr, paddr); 623 __flush_dcache_page(paddr, paddr);
624 624
625 /* invalidate any existing icache lines */ 625 /* invalidate any existing icache lines (U-mapping) */
626 if (vma->vm_flags & VM_EXEC) 626 if (vma->vm_flags & VM_EXEC)
627 __inv_icache_page(paddr, vaddr); 627 __inv_icache_page(paddr, vaddr);
628 } 628 }
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 0365cbbc9179..34e1569a11ee 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -76,6 +76,8 @@ config ARM
76 select IRQ_FORCED_THREADING 76 select IRQ_FORCED_THREADING
77 select MODULES_USE_ELF_REL 77 select MODULES_USE_ELF_REL
78 select NO_BOOTMEM 78 select NO_BOOTMEM
79 select OF_EARLY_FLATTREE if OF
80 select OF_RESERVED_MEM if OF
79 select OLD_SIGACTION 81 select OLD_SIGACTION
80 select OLD_SIGSUSPEND3 82 select OLD_SIGSUSPEND3
81 select PERF_USE_VMALLOC 83 select PERF_USE_VMALLOC
@@ -1822,8 +1824,6 @@ config USE_OF
1822 bool "Flattened Device Tree support" 1824 bool "Flattened Device Tree support"
1823 select IRQ_DOMAIN 1825 select IRQ_DOMAIN
1824 select OF 1826 select OF
1825 select OF_EARLY_FLATTREE
1826 select OF_RESERVED_MEM
1827 help 1827 help
1828 Include support for flattened device tree machine descriptions. 1828 Include support for flattened device tree machine descriptions.
1829 1829
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts
index d9ba6b879fc1..00352e761b8c 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts
@@ -604,6 +604,7 @@
604 reg = <0x6f>; 604 reg = <0x6f>;
605 interrupts-extended = <&crossbar_mpu GIC_SPI 2 IRQ_TYPE_EDGE_RISING>, 605 interrupts-extended = <&crossbar_mpu GIC_SPI 2 IRQ_TYPE_EDGE_RISING>,
606 <&dra7_pmx_core 0x424>; 606 <&dra7_pmx_core 0x424>;
607 interrupt-names = "irq", "wakeup";
607 608
608 pinctrl-names = "default"; 609 pinctrl-names = "default";
609 pinctrl-0 = <&mcp79410_pins_default>; 610 pinctrl-0 = <&mcp79410_pins_default>;
diff --git a/arch/arm/boot/dts/animeo_ip.dts b/arch/arm/boot/dts/animeo_ip.dts
index 4e0ad3b82796..0962f2fa3f6e 100644
--- a/arch/arm/boot/dts/animeo_ip.dts
+++ b/arch/arm/boot/dts/animeo_ip.dts
@@ -155,21 +155,21 @@
155 label = "keyswitch_in"; 155 label = "keyswitch_in";
156 gpios = <&pioB 1 GPIO_ACTIVE_HIGH>; 156 gpios = <&pioB 1 GPIO_ACTIVE_HIGH>;
157 linux,code = <28>; 157 linux,code = <28>;
158 gpio-key,wakeup; 158 wakeup-source;
159 }; 159 };
160 160
161 error_in { 161 error_in {
162 label = "error_in"; 162 label = "error_in";
163 gpios = <&pioB 2 GPIO_ACTIVE_HIGH>; 163 gpios = <&pioB 2 GPIO_ACTIVE_HIGH>;
164 linux,code = <29>; 164 linux,code = <29>;
165 gpio-key,wakeup; 165 wakeup-source;
166 }; 166 };
167 167
168 btn { 168 btn {
169 label = "btn"; 169 label = "btn";
170 gpios = <&pioC 23 GPIO_ACTIVE_HIGH>; 170 gpios = <&pioC 23 GPIO_ACTIVE_HIGH>;
171 linux,code = <31>; 171 linux,code = <31>;
172 gpio-key,wakeup; 172 wakeup-source;
173 }; 173 };
174 }; 174 };
175}; 175};
diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi
index c6a0e9d7f1a9..e8b7f6726772 100644
--- a/arch/arm/boot/dts/armada-38x.dtsi
+++ b/arch/arm/boot/dts/armada-38x.dtsi
@@ -498,6 +498,7 @@
498 reg = <0x70000 0x4000>; 498 reg = <0x70000 0x4000>;
499 interrupts-extended = <&mpic 8>; 499 interrupts-extended = <&mpic 8>;
500 clocks = <&gateclk 4>; 500 clocks = <&gateclk 4>;
501 tx-csum-limit = <9800>;
501 status = "disabled"; 502 status = "disabled";
502 }; 503 };
503 504
diff --git a/arch/arm/boot/dts/at91-foxg20.dts b/arch/arm/boot/dts/at91-foxg20.dts
index f89598af4c2b..6bf873e7d96c 100644
--- a/arch/arm/boot/dts/at91-foxg20.dts
+++ b/arch/arm/boot/dts/at91-foxg20.dts
@@ -159,7 +159,7 @@
159 label = "Button"; 159 label = "Button";
160 gpios = <&pioC 4 GPIO_ACTIVE_LOW>; 160 gpios = <&pioC 4 GPIO_ACTIVE_LOW>;
161 linux,code = <0x103>; 161 linux,code = <0x103>;
162 gpio-key,wakeup; 162 wakeup-source;
163 }; 163 };
164 }; 164 };
165}; 165};
diff --git a/arch/arm/boot/dts/at91-kizbox.dts b/arch/arm/boot/dts/at91-kizbox.dts
index bf18ece0c027..229e989eb60d 100644
--- a/arch/arm/boot/dts/at91-kizbox.dts
+++ b/arch/arm/boot/dts/at91-kizbox.dts
@@ -24,15 +24,6 @@
24 }; 24 };
25 25
26 clocks { 26 clocks {
27 #address-cells = <1>;
28 #size-cells = <1>;
29 ranges;
30
31 main_clock: clock@0 {
32 compatible = "atmel,osc", "fixed-clock";
33 clock-frequency = <18432000>;
34 };
35
36 main_xtal { 27 main_xtal {
37 clock-frequency = <18432000>; 28 clock-frequency = <18432000>;
38 }; 29 };
@@ -94,14 +85,14 @@
94 label = "PB_RST"; 85 label = "PB_RST";
95 gpios = <&pioB 30 GPIO_ACTIVE_HIGH>; 86 gpios = <&pioB 30 GPIO_ACTIVE_HIGH>;
96 linux,code = <0x100>; 87 linux,code = <0x100>;
97 gpio-key,wakeup; 88 wakeup-source;
98 }; 89 };
99 90
100 user { 91 user {
101 label = "PB_USER"; 92 label = "PB_USER";
102 gpios = <&pioB 31 GPIO_ACTIVE_HIGH>; 93 gpios = <&pioB 31 GPIO_ACTIVE_HIGH>;
103 linux,code = <0x101>; 94 linux,code = <0x101>;
104 gpio-key,wakeup; 95 wakeup-source;
105 }; 96 };
106 }; 97 };
107 98
diff --git a/arch/arm/boot/dts/at91-kizbox2.dts b/arch/arm/boot/dts/at91-kizbox2.dts
index f0b1563cb3f1..50a14568f094 100644
--- a/arch/arm/boot/dts/at91-kizbox2.dts
+++ b/arch/arm/boot/dts/at91-kizbox2.dts
@@ -171,21 +171,21 @@
171 label = "PB_PROG"; 171 label = "PB_PROG";
172 gpios = <&pioE 27 GPIO_ACTIVE_LOW>; 172 gpios = <&pioE 27 GPIO_ACTIVE_LOW>;
173 linux,code = <0x102>; 173 linux,code = <0x102>;
174 gpio-key,wakeup; 174 wakeup-source;
175 }; 175 };
176 176
177 reset { 177 reset {
178 label = "PB_RST"; 178 label = "PB_RST";
179 gpios = <&pioE 29 GPIO_ACTIVE_LOW>; 179 gpios = <&pioE 29 GPIO_ACTIVE_LOW>;
180 linux,code = <0x100>; 180 linux,code = <0x100>;
181 gpio-key,wakeup; 181 wakeup-source;
182 }; 182 };
183 183
184 user { 184 user {
185 label = "PB_USER"; 185 label = "PB_USER";
186 gpios = <&pioE 31 GPIO_ACTIVE_HIGH>; 186 gpios = <&pioE 31 GPIO_ACTIVE_HIGH>;
187 linux,code = <0x101>; 187 linux,code = <0x101>;
188 gpio-key,wakeup; 188 wakeup-source;
189 }; 189 };
190 }; 190 };
191 191
diff --git a/arch/arm/boot/dts/at91-kizboxmini.dts b/arch/arm/boot/dts/at91-kizboxmini.dts
index 9f72b4932634..9682d105d4d8 100644
--- a/arch/arm/boot/dts/at91-kizboxmini.dts
+++ b/arch/arm/boot/dts/at91-kizboxmini.dts
@@ -98,14 +98,14 @@
98 label = "PB_PROG"; 98 label = "PB_PROG";
99 gpios = <&pioC 17 GPIO_ACTIVE_LOW>; 99 gpios = <&pioC 17 GPIO_ACTIVE_LOW>;
100 linux,code = <0x102>; 100 linux,code = <0x102>;
101 gpio-key,wakeup; 101 wakeup-source;
102 }; 102 };
103 103
104 reset { 104 reset {
105 label = "PB_RST"; 105 label = "PB_RST";
106 gpios = <&pioC 16 GPIO_ACTIVE_LOW>; 106 gpios = <&pioC 16 GPIO_ACTIVE_LOW>;
107 linux,code = <0x100>; 107 linux,code = <0x100>;
108 gpio-key,wakeup; 108 wakeup-source;
109 }; 109 };
110 }; 110 };
111 111
diff --git a/arch/arm/boot/dts/at91-qil_a9260.dts b/arch/arm/boot/dts/at91-qil_a9260.dts
index a9aef53ab764..4f2eebf4a560 100644
--- a/arch/arm/boot/dts/at91-qil_a9260.dts
+++ b/arch/arm/boot/dts/at91-qil_a9260.dts
@@ -183,7 +183,7 @@
183 label = "user_pb"; 183 label = "user_pb";
184 gpios = <&pioB 10 GPIO_ACTIVE_LOW>; 184 gpios = <&pioB 10 GPIO_ACTIVE_LOW>;
185 linux,code = <28>; 185 linux,code = <28>;
186 gpio-key,wakeup; 186 wakeup-source;
187 }; 187 };
188 }; 188 };
189 189
diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
index e07c2b206beb..ad6de73ed5a5 100644
--- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
@@ -45,6 +45,7 @@
45/dts-v1/; 45/dts-v1/;
46#include "sama5d2.dtsi" 46#include "sama5d2.dtsi"
47#include "sama5d2-pinfunc.h" 47#include "sama5d2-pinfunc.h"
48#include <dt-bindings/mfd/atmel-flexcom.h>
48 49
49/ { 50/ {
50 model = "Atmel SAMA5D2 Xplained"; 51 model = "Atmel SAMA5D2 Xplained";
@@ -59,15 +60,6 @@
59 }; 60 };
60 61
61 clocks { 62 clocks {
62 #address-cells = <1>;
63 #size-cells = <1>;
64 ranges;
65
66 main_clock: clock@0 {
67 compatible = "atmel,osc", "fixed-clock";
68 clock-frequency = <12000000>;
69 };
70
71 slow_xtal { 63 slow_xtal {
72 clock-frequency = <32768>; 64 clock-frequency = <32768>;
73 }; 65 };
@@ -91,6 +83,22 @@
91 status = "okay"; 83 status = "okay";
92 }; 84 };
93 85
86 sdmmc0: sdio-host@a0000000 {
87 bus-width = <8>;
88 pinctrl-names = "default";
89 pinctrl-0 = <&pinctrl_sdmmc0_default>;
90 non-removable;
91 mmc-ddr-1_8v;
92 status = "okay";
93 };
94
95 sdmmc1: sdio-host@b0000000 {
96 bus-width = <4>;
97 pinctrl-names = "default";
98 pinctrl-0 = <&pinctrl_sdmmc1_default>;
99 status = "okay"; /* conflict with qspi0 */
100 };
101
94 apb { 102 apb {
95 spi0: spi@f8000000 { 103 spi0: spi@f8000000 {
96 pinctrl-names = "default"; 104 pinctrl-names = "default";
@@ -181,12 +189,49 @@
181 }; 189 };
182 }; 190 };
183 191
192 flx0: flexcom@f8034000 {
193 atmel,flexcom-mode = <ATMEL_FLEXCOM_MODE_USART>;
194 status = "disabled"; /* conflict with ISC_D2 & ISC_D3 data pins */
195
196 uart5: serial@200 {
197 compatible = "atmel,at91sam9260-usart";
198 reg = <0x200 0x200>;
199 interrupts = <19 IRQ_TYPE_LEVEL_HIGH 7>;
200 clocks = <&flx0_clk>;
201 clock-names = "usart";
202 pinctrl-names = "default";
203 pinctrl-0 = <&pinctrl_flx0_default>;
204 atmel,fifo-size = <32>;
205 status = "okay";
206 };
207 };
208
184 uart3: serial@fc008000 { 209 uart3: serial@fc008000 {
185 pinctrl-names = "default"; 210 pinctrl-names = "default";
186 pinctrl-0 = <&pinctrl_uart3_default>; 211 pinctrl-0 = <&pinctrl_uart3_default>;
187 status = "okay"; 212 status = "okay";
188 }; 213 };
189 214
215 flx4: flexcom@fc018000 {
216 atmel,flexcom-mode = <ATMEL_FLEXCOM_MODE_TWI>;
217 status = "okay";
218
219 i2c2: i2c@600 {
220 compatible = "atmel,sama5d2-i2c";
221 reg = <0x600 0x200>;
222 interrupts = <23 IRQ_TYPE_LEVEL_HIGH 7>;
223 dmas = <0>, <0>;
224 dma-names = "tx", "rx";
225 #address-cells = <1>;
226 #size-cells = <0>;
227 clocks = <&flx4_clk>;
228 pinctrl-names = "default";
229 pinctrl-0 = <&pinctrl_flx4_default>;
230 atmel,fifo-size = <16>;
231 status = "okay";
232 };
233 };
234
190 i2c1: i2c@fc028000 { 235 i2c1: i2c@fc028000 {
191 dmas = <0>, <0>; 236 dmas = <0>, <0>;
192 pinctrl-names = "default"; 237 pinctrl-names = "default";
@@ -201,6 +246,18 @@
201 }; 246 };
202 247
203 pinctrl@fc038000 { 248 pinctrl@fc038000 {
249 pinctrl_flx0_default: flx0_default {
250 pinmux = <PIN_PB28__FLEXCOM0_IO0>,
251 <PIN_PB29__FLEXCOM0_IO1>;
252 bias-disable;
253 };
254
255 pinctrl_flx4_default: flx4_default {
256 pinmux = <PIN_PD12__FLEXCOM4_IO0>,
257 <PIN_PD13__FLEXCOM4_IO1>;
258 bias-disable;
259 };
260
204 pinctrl_i2c0_default: i2c0_default { 261 pinctrl_i2c0_default: i2c0_default {
205 pinmux = <PIN_PD21__TWD0>, 262 pinmux = <PIN_PD21__TWD0>,
206 <PIN_PD22__TWCK0>; 263 <PIN_PD22__TWCK0>;
@@ -227,6 +284,46 @@
227 bias-disable; 284 bias-disable;
228 }; 285 };
229 286
287 pinctrl_sdmmc0_default: sdmmc0_default {
288 cmd_data {
289 pinmux = <PIN_PA1__SDMMC0_CMD>,
290 <PIN_PA2__SDMMC0_DAT0>,
291 <PIN_PA3__SDMMC0_DAT1>,
292 <PIN_PA4__SDMMC0_DAT2>,
293 <PIN_PA5__SDMMC0_DAT3>,
294 <PIN_PA6__SDMMC0_DAT4>,
295 <PIN_PA7__SDMMC0_DAT5>,
296 <PIN_PA8__SDMMC0_DAT6>,
297 <PIN_PA9__SDMMC0_DAT7>;
298 bias-pull-up;
299 };
300
301 ck_cd_rstn_vddsel {
302 pinmux = <PIN_PA0__SDMMC0_CK>,
303 <PIN_PA10__SDMMC0_RSTN>,
304 <PIN_PA11__SDMMC0_VDDSEL>,
305 <PIN_PA13__SDMMC0_CD>;
306 bias-disable;
307 };
308 };
309
310 pinctrl_sdmmc1_default: sdmmc1_default {
311 cmd_data {
312 pinmux = <PIN_PA28__SDMMC1_CMD>,
313 <PIN_PA18__SDMMC1_DAT0>,
314 <PIN_PA19__SDMMC1_DAT1>,
315 <PIN_PA20__SDMMC1_DAT2>,
316 <PIN_PA21__SDMMC1_DAT3>;
317 bias-pull-up;
318 };
319
320 conf-ck_cd {
321 pinmux = <PIN_PA22__SDMMC1_CK>,
322 <PIN_PA30__SDMMC1_CD>;
323 bias-disable;
324 };
325 };
326
230 pinctrl_spi0_default: spi0_default { 327 pinctrl_spi0_default: spi0_default {
231 pinmux = <PIN_PA14__SPI0_SPCK>, 328 pinmux = <PIN_PA14__SPI0_SPCK>,
232 <PIN_PA15__SPI0_MOSI>, 329 <PIN_PA15__SPI0_MOSI>,
diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
index 8488ac53d22d..ff888d21c786 100644
--- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
@@ -315,7 +315,7 @@
315 label = "PB_USER"; 315 label = "PB_USER";
316 gpios = <&pioE 29 GPIO_ACTIVE_LOW>; 316 gpios = <&pioE 29 GPIO_ACTIVE_LOW>;
317 linux,code = <0x104>; 317 linux,code = <0x104>;
318 gpio-key,wakeup; 318 wakeup-source;
319 }; 319 };
320 }; 320 };
321 321
diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
index 45371a1b61b3..131614f28e75 100644
--- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
@@ -50,7 +50,6 @@
50 compatible = "atmel,sama5d4-xplained", "atmel,sama5d4", "atmel,sama5"; 50 compatible = "atmel,sama5d4-xplained", "atmel,sama5d4", "atmel,sama5";
51 51
52 chosen { 52 chosen {
53 bootargs = "ignore_loglevel earlyprintk";
54 stdout-path = "serial0:115200n8"; 53 stdout-path = "serial0:115200n8";
55 }; 54 };
56 55
@@ -59,15 +58,6 @@
59 }; 58 };
60 59
61 clocks { 60 clocks {
62 #address-cells = <1>;
63 #size-cells = <1>;
64 ranges;
65
66 main_clock: clock@0 {
67 compatible = "atmel,osc", "fixed-clock";
68 clock-frequency = <12000000>;
69 };
70
71 slow_xtal { 61 slow_xtal {
72 clock-frequency = <32768>; 62 clock-frequency = <32768>;
73 }; 63 };
@@ -235,7 +225,7 @@
235 label = "pb_user1"; 225 label = "pb_user1";
236 gpios = <&pioE 8 GPIO_ACTIVE_HIGH>; 226 gpios = <&pioE 8 GPIO_ACTIVE_HIGH>;
237 linux,code = <0x100>; 227 linux,code = <0x100>;
238 gpio-key,wakeup; 228 wakeup-source;
239 }; 229 };
240 }; 230 };
241 231
diff --git a/arch/arm/boot/dts/at91-sama5d4ek.dts b/arch/arm/boot/dts/at91-sama5d4ek.dts
index 6d272c0125e3..2d4a33100af6 100644
--- a/arch/arm/boot/dts/at91-sama5d4ek.dts
+++ b/arch/arm/boot/dts/at91-sama5d4ek.dts
@@ -50,7 +50,6 @@
50 compatible = "atmel,sama5d4ek", "atmel,sama5d4", "atmel,sama5"; 50 compatible = "atmel,sama5d4ek", "atmel,sama5d4", "atmel,sama5";
51 51
52 chosen { 52 chosen {
53 bootargs = "ignore_loglevel earlyprintk";
54 stdout-path = "serial0:115200n8"; 53 stdout-path = "serial0:115200n8";
55 }; 54 };
56 55
@@ -59,15 +58,6 @@
59 }; 58 };
60 59
61 clocks { 60 clocks {
62 #address-cells = <1>;
63 #size-cells = <1>;
64 ranges;
65
66 main_clock: clock@0 {
67 compatible = "atmel,osc", "fixed-clock";
68 clock-frequency = <12000000>;
69 };
70
71 slow_xtal { 61 slow_xtal {
72 clock-frequency = <32768>; 62 clock-frequency = <32768>;
73 }; 63 };
@@ -304,7 +294,7 @@
304 label = "pb_user1"; 294 label = "pb_user1";
305 gpios = <&pioE 13 GPIO_ACTIVE_HIGH>; 295 gpios = <&pioE 13 GPIO_ACTIVE_HIGH>;
306 linux,code = <0x100>; 296 linux,code = <0x100>;
307 gpio-key,wakeup; 297 wakeup-source;
308 }; 298 };
309 }; 299 };
310 300
diff --git a/arch/arm/boot/dts/at91rm9200ek.dts b/arch/arm/boot/dts/at91rm9200ek.dts
index 8dab4b75ca97..f90e1c2d3caa 100644
--- a/arch/arm/boot/dts/at91rm9200ek.dts
+++ b/arch/arm/boot/dts/at91rm9200ek.dts
@@ -21,15 +21,6 @@
21 }; 21 };
22 22
23 clocks { 23 clocks {
24 #address-cells = <1>;
25 #size-cells = <1>;
26 ranges;
27
28 main_clock: clock@0 {
29 compatible = "atmel,osc", "fixed-clock";
30 clock-frequency = <18432000>;
31 };
32
33 slow_xtal { 24 slow_xtal {
34 clock-frequency = <32768>; 25 clock-frequency = <32768>;
35 }; 26 };
diff --git a/arch/arm/boot/dts/at91sam9261ek.dts b/arch/arm/boot/dts/at91sam9261ek.dts
index 2e92ac020f23..55bd51f07fa6 100644
--- a/arch/arm/boot/dts/at91sam9261ek.dts
+++ b/arch/arm/boot/dts/at91sam9261ek.dts
@@ -22,15 +22,6 @@
22 }; 22 };
23 23
24 clocks { 24 clocks {
25 #address-cells = <1>;
26 #size-cells = <1>;
27 ranges;
28
29 main_clock: clock@0 {
30 compatible = "atmel,osc", "fixed-clock";
31 clock-frequency = <18432000>;
32 };
33
34 slow_xtal { 25 slow_xtal {
35 clock-frequency = <32768>; 26 clock-frequency = <32768>;
36 }; 27 };
@@ -149,7 +140,7 @@
149 ti,debounce-tol = /bits/ 16 <65535>; 140 ti,debounce-tol = /bits/ 16 <65535>;
150 ti,debounce-max = /bits/ 16 <1>; 141 ti,debounce-max = /bits/ 16 <1>;
151 142
152 linux,wakeup; 143 wakeup-source;
153 }; 144 };
154 }; 145 };
155 146
@@ -193,28 +184,28 @@
193 label = "button_0"; 184 label = "button_0";
194 gpios = <&pioA 27 GPIO_ACTIVE_LOW>; 185 gpios = <&pioA 27 GPIO_ACTIVE_LOW>;
195 linux,code = <256>; 186 linux,code = <256>;
196 gpio-key,wakeup; 187 wakeup-source;
197 }; 188 };
198 189
199 button_1 { 190 button_1 {
200 label = "button_1"; 191 label = "button_1";
201 gpios = <&pioA 26 GPIO_ACTIVE_LOW>; 192 gpios = <&pioA 26 GPIO_ACTIVE_LOW>;
202 linux,code = <257>; 193 linux,code = <257>;
203 gpio-key,wakeup; 194 wakeup-source;
204 }; 195 };
205 196
206 button_2 { 197 button_2 {
207 label = "button_2"; 198 label = "button_2";
208 gpios = <&pioA 25 GPIO_ACTIVE_LOW>; 199 gpios = <&pioA 25 GPIO_ACTIVE_LOW>;
209 linux,code = <258>; 200 linux,code = <258>;
210 gpio-key,wakeup; 201 wakeup-source;
211 }; 202 };
212 203
213 button_3 { 204 button_3 {
214 label = "button_3"; 205 label = "button_3";
215 gpios = <&pioA 24 GPIO_ACTIVE_LOW>; 206 gpios = <&pioA 24 GPIO_ACTIVE_LOW>;
216 linux,code = <259>; 207 linux,code = <259>;
217 gpio-key,wakeup; 208 wakeup-source;
218 }; 209 };
219 }; 210 };
220}; 211};
diff --git a/arch/arm/boot/dts/at91sam9263ek.dts b/arch/arm/boot/dts/at91sam9263ek.dts
index 23381276ffb8..59df9d73d276 100644
--- a/arch/arm/boot/dts/at91sam9263ek.dts
+++ b/arch/arm/boot/dts/at91sam9263ek.dts
@@ -22,15 +22,6 @@
22 }; 22 };
23 23
24 clocks { 24 clocks {
25 #address-cells = <1>;
26 #size-cells = <1>;
27 ranges;
28
29 main_clock: clock@0 {
30 compatible = "atmel,osc", "fixed-clock";
31 clock-frequency = <16367660>;
32 };
33
34 slow_xtal { 25 slow_xtal {
35 clock-frequency = <32768>; 26 clock-frequency = <32768>;
36 }; 27 };
@@ -213,14 +204,14 @@
213 label = "left_click"; 204 label = "left_click";
214 gpios = <&pioC 5 GPIO_ACTIVE_LOW>; 205 gpios = <&pioC 5 GPIO_ACTIVE_LOW>;
215 linux,code = <272>; 206 linux,code = <272>;
216 gpio-key,wakeup; 207 wakeup-source;
217 }; 208 };
218 209
219 right_click { 210 right_click {
220 label = "right_click"; 211 label = "right_click";
221 gpios = <&pioC 4 GPIO_ACTIVE_LOW>; 212 gpios = <&pioC 4 GPIO_ACTIVE_LOW>;
222 linux,code = <273>; 213 linux,code = <273>;
223 gpio-key,wakeup; 214 wakeup-source;
224 }; 215 };
225 }; 216 };
226 217
diff --git a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
index 57548a2c5a1e..e9cc99b6353a 100644
--- a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
+++ b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
@@ -19,15 +19,6 @@
19 }; 19 };
20 20
21 clocks { 21 clocks {
22 #address-cells = <1>;
23 #size-cells = <1>;
24 ranges;
25
26 main_clock: clock@0 {
27 compatible = "atmel,osc", "fixed-clock";
28 clock-frequency = <18432000>;
29 };
30
31 slow_xtal { 22 slow_xtal {
32 clock-frequency = <32768>; 23 clock-frequency = <32768>;
33 }; 24 };
@@ -206,14 +197,14 @@
206 label = "Button 3"; 197 label = "Button 3";
207 gpios = <&pioA 30 GPIO_ACTIVE_LOW>; 198 gpios = <&pioA 30 GPIO_ACTIVE_LOW>;
208 linux,code = <0x103>; 199 linux,code = <0x103>;
209 gpio-key,wakeup; 200 wakeup-source;
210 }; 201 };
211 202
212 btn4 { 203 btn4 {
213 label = "Button 4"; 204 label = "Button 4";
214 gpios = <&pioA 31 GPIO_ACTIVE_LOW>; 205 gpios = <&pioA 31 GPIO_ACTIVE_LOW>;
215 linux,code = <0x104>; 206 linux,code = <0x104>;
216 gpio-key,wakeup; 207 wakeup-source;
217 }; 208 };
218 }; 209 };
219 210
diff --git a/arch/arm/boot/dts/at91sam9m10g45ek.dts b/arch/arm/boot/dts/at91sam9m10g45ek.dts
index 9d16ef8453c5..2400c99134f7 100644
--- a/arch/arm/boot/dts/at91sam9m10g45ek.dts
+++ b/arch/arm/boot/dts/at91sam9m10g45ek.dts
@@ -24,15 +24,6 @@
24 }; 24 };
25 25
26 clocks { 26 clocks {
27 #address-cells = <1>;
28 #size-cells = <1>;
29 ranges;
30
31 main_clock: clock@0 {
32 compatible = "atmel,osc", "fixed-clock";
33 clock-frequency = <12000000>;
34 };
35
36 slow_xtal { 27 slow_xtal {
37 clock-frequency = <32768>; 28 clock-frequency = <32768>;
38 }; 29 };
@@ -323,14 +314,14 @@
323 label = "left_click"; 314 label = "left_click";
324 gpios = <&pioB 6 GPIO_ACTIVE_LOW>; 315 gpios = <&pioB 6 GPIO_ACTIVE_LOW>;
325 linux,code = <272>; 316 linux,code = <272>;
326 gpio-key,wakeup; 317 wakeup-source;
327 }; 318 };
328 319
329 right_click { 320 right_click {
330 label = "right_click"; 321 label = "right_click";
331 gpios = <&pioB 7 GPIO_ACTIVE_LOW>; 322 gpios = <&pioB 7 GPIO_ACTIVE_LOW>;
332 linux,code = <273>; 323 linux,code = <273>;
333 gpio-key,wakeup; 324 wakeup-source;
334 }; 325 };
335 326
336 left { 327 left {
diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts
index acf3451a332d..ca4ddf86817a 100644
--- a/arch/arm/boot/dts/at91sam9n12ek.dts
+++ b/arch/arm/boot/dts/at91sam9n12ek.dts
@@ -23,15 +23,6 @@
23 }; 23 };
24 24
25 clocks { 25 clocks {
26 #address-cells = <1>;
27 #size-cells = <1>;
28 ranges;
29
30 main_clock: clock@0 {
31 compatible = "atmel,osc", "fixed-clock";
32 clock-frequency = <16000000>;
33 };
34
35 slow_xtal { 26 slow_xtal {
36 clock-frequency = <32768>; 27 clock-frequency = <32768>;
37 }; 28 };
@@ -219,7 +210,7 @@
219 label = "Enter"; 210 label = "Enter";
220 gpios = <&pioB 3 GPIO_ACTIVE_LOW>; 211 gpios = <&pioB 3 GPIO_ACTIVE_LOW>;
221 linux,code = <28>; 212 linux,code = <28>;
222 gpio-key,wakeup; 213 wakeup-source;
223 }; 214 };
224 }; 215 };
225 216
diff --git a/arch/arm/boot/dts/at91sam9rlek.dts b/arch/arm/boot/dts/at91sam9rlek.dts
index 558c9f220bed..f10566f759cd 100644
--- a/arch/arm/boot/dts/at91sam9rlek.dts
+++ b/arch/arm/boot/dts/at91sam9rlek.dts
@@ -22,15 +22,6 @@
22 }; 22 };
23 23
24 clocks { 24 clocks {
25 #address-cells = <1>;
26 #size-cells = <1>;
27 ranges;
28
29 main_clock: clock {
30 compatible = "atmel,osc", "fixed-clock";
31 clock-frequency = <12000000>;
32 };
33
34 slow_xtal { 25 slow_xtal {
35 clock-frequency = <32768>; 26 clock-frequency = <32768>;
36 }; 27 };
@@ -225,14 +216,14 @@
225 label = "right_click"; 216 label = "right_click";
226 gpios = <&pioB 0 GPIO_ACTIVE_LOW>; 217 gpios = <&pioB 0 GPIO_ACTIVE_LOW>;
227 linux,code = <273>; 218 linux,code = <273>;
228 gpio-key,wakeup; 219 wakeup-source;
229 }; 220 };
230 221
231 left_click { 222 left_click {
232 label = "left_click"; 223 label = "left_click";
233 gpios = <&pioB 1 GPIO_ACTIVE_LOW>; 224 gpios = <&pioB 1 GPIO_ACTIVE_LOW>;
234 linux,code = <272>; 225 linux,code = <272>;
235 gpio-key,wakeup; 226 wakeup-source;
236 }; 227 };
237 }; 228 };
238 229
diff --git a/arch/arm/boot/dts/at91sam9x5cm.dtsi b/arch/arm/boot/dts/at91sam9x5cm.dtsi
index 26112ebd15fc..b098ad8cd93a 100644
--- a/arch/arm/boot/dts/at91sam9x5cm.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5cm.dtsi
@@ -13,17 +13,6 @@
13 }; 13 };
14 14
15 clocks { 15 clocks {
16 #address-cells = <1>;
17 #size-cells = <1>;
18 ranges;
19
20 main_clock: clock@0 {
21 compatible = "atmel,osc", "fixed-clock";
22 clock-frequency = <12000000>;
23 };
24 };
25
26 clocks {
27 slow_xtal { 16 slow_xtal {
28 clock-frequency = <32768>; 17 clock-frequency = <32768>;
29 }; 18 };
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index bc672fb91466..fe99231cbde5 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -1459,8 +1459,8 @@
1459 interrupt-names = "tx", "rx"; 1459 interrupt-names = "tx", "rx";
1460 dmas = <&sdma_xbar 133>, <&sdma_xbar 132>; 1460 dmas = <&sdma_xbar 133>, <&sdma_xbar 132>;
1461 dma-names = "tx", "rx"; 1461 dma-names = "tx", "rx";
1462 clocks = <&mcasp3_ahclkx_mux>; 1462 clocks = <&mcasp3_aux_gfclk_mux>, <&mcasp3_ahclkx_mux>;
1463 clock-names = "fck"; 1463 clock-names = "fck", "ahclkx";
1464 status = "disabled"; 1464 status = "disabled";
1465 }; 1465 };
1466 1466
diff --git a/arch/arm/boot/dts/exynos5800-peach-pi.dts b/arch/arm/boot/dts/exynos5800-peach-pi.dts
index 49a4f43e5ac2..1cc2e95ffc66 100644
--- a/arch/arm/boot/dts/exynos5800-peach-pi.dts
+++ b/arch/arm/boot/dts/exynos5800-peach-pi.dts
@@ -122,6 +122,12 @@
122 compatible = "auo,b133htn01"; 122 compatible = "auo,b133htn01";
123 power-supply = <&tps65090_fet6>; 123 power-supply = <&tps65090_fet6>;
124 backlight = <&backlight>; 124 backlight = <&backlight>;
125
126 port {
127 panel_in: endpoint {
128 remote-endpoint = <&dp_out>;
129 };
130 };
125 }; 131 };
126 132
127 mmc1_pwrseq: mmc1_pwrseq { 133 mmc1_pwrseq: mmc1_pwrseq {
@@ -148,7 +154,14 @@
148 samsung,link-rate = <0x0a>; 154 samsung,link-rate = <0x0a>;
149 samsung,lane-count = <2>; 155 samsung,lane-count = <2>;
150 samsung,hpd-gpio = <&gpx2 6 GPIO_ACTIVE_HIGH>; 156 samsung,hpd-gpio = <&gpx2 6 GPIO_ACTIVE_HIGH>;
151 panel = <&panel>; 157
158 ports {
159 port {
160 dp_out: endpoint {
161 remote-endpoint = <&panel_in>;
162 };
163 };
164 };
152}; 165};
153 166
154&fimd { 167&fimd {
diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
index feb9d34b239c..f818ea483aeb 100644
--- a/arch/arm/boot/dts/imx27.dtsi
+++ b/arch/arm/boot/dts/imx27.dtsi
@@ -486,7 +486,10 @@
486 compatible = "fsl,imx27-usb"; 486 compatible = "fsl,imx27-usb";
487 reg = <0x10024000 0x200>; 487 reg = <0x10024000 0x200>;
488 interrupts = <56>; 488 interrupts = <56>;
489 clocks = <&clks IMX27_CLK_USB_IPG_GATE>; 489 clocks = <&clks IMX27_CLK_USB_IPG_GATE>,
490 <&clks IMX27_CLK_USB_AHB_GATE>,
491 <&clks IMX27_CLK_USB_DIV>;
492 clock-names = "ipg", "ahb", "per";
490 fsl,usbmisc = <&usbmisc 0>; 493 fsl,usbmisc = <&usbmisc 0>;
491 status = "disabled"; 494 status = "disabled";
492 }; 495 };
@@ -495,7 +498,10 @@
495 compatible = "fsl,imx27-usb"; 498 compatible = "fsl,imx27-usb";
496 reg = <0x10024200 0x200>; 499 reg = <0x10024200 0x200>;
497 interrupts = <54>; 500 interrupts = <54>;
498 clocks = <&clks IMX27_CLK_USB_IPG_GATE>; 501 clocks = <&clks IMX27_CLK_USB_IPG_GATE>,
502 <&clks IMX27_CLK_USB_AHB_GATE>,
503 <&clks IMX27_CLK_USB_DIV>;
504 clock-names = "ipg", "ahb", "per";
499 fsl,usbmisc = <&usbmisc 1>; 505 fsl,usbmisc = <&usbmisc 1>;
500 dr_mode = "host"; 506 dr_mode = "host";
501 status = "disabled"; 507 status = "disabled";
@@ -505,7 +511,10 @@
505 compatible = "fsl,imx27-usb"; 511 compatible = "fsl,imx27-usb";
506 reg = <0x10024400 0x200>; 512 reg = <0x10024400 0x200>;
507 interrupts = <55>; 513 interrupts = <55>;
508 clocks = <&clks IMX27_CLK_USB_IPG_GATE>; 514 clocks = <&clks IMX27_CLK_USB_IPG_GATE>,
515 <&clks IMX27_CLK_USB_AHB_GATE>,
516 <&clks IMX27_CLK_USB_DIV>;
517 clock-names = "ipg", "ahb", "per";
509 fsl,usbmisc = <&usbmisc 2>; 518 fsl,usbmisc = <&usbmisc 2>;
510 dr_mode = "host"; 519 dr_mode = "host";
511 status = "disabled"; 520 status = "disabled";
@@ -515,7 +524,6 @@
515 #index-cells = <1>; 524 #index-cells = <1>;
516 compatible = "fsl,imx27-usbmisc"; 525 compatible = "fsl,imx27-usbmisc";
517 reg = <0x10024600 0x200>; 526 reg = <0x10024600 0x200>;
518 clocks = <&clks IMX27_CLK_USB_AHB_GATE>;
519 }; 527 };
520 528
521 sahara2: sahara@10025000 { 529 sahara2: sahara@10025000 {
diff --git a/arch/arm/boot/dts/k2l-netcp.dtsi b/arch/arm/boot/dts/k2l-netcp.dtsi
index 01aef230773d..5acbd0dcc2ab 100644
--- a/arch/arm/boot/dts/k2l-netcp.dtsi
+++ b/arch/arm/boot/dts/k2l-netcp.dtsi
@@ -137,7 +137,7 @@ netcp: netcp@26000000 {
137 /* NetCP address range */ 137 /* NetCP address range */
138 ranges = <0 0x26000000 0x1000000>; 138 ranges = <0 0x26000000 0x1000000>;
139 139
140 clocks = <&papllclk>, <&clkcpgmac>, <&chipclk12>; 140 clocks = <&clkosr>, <&papllclk>, <&clkcpgmac>, <&chipclk12>;
141 dma-coherent; 141 dma-coherent;
142 142
143 ti,navigator-dmas = <&dma_gbe 0>, 143 ti,navigator-dmas = <&dma_gbe 0>,
diff --git a/arch/arm/boot/dts/kirkwood-ts219.dtsi b/arch/arm/boot/dts/kirkwood-ts219.dtsi
index c56ab6bbfe3c..0e46560551f4 100644
--- a/arch/arm/boot/dts/kirkwood-ts219.dtsi
+++ b/arch/arm/boot/dts/kirkwood-ts219.dtsi
@@ -40,7 +40,7 @@
40 }; 40 };
41 poweroff@12100 { 41 poweroff@12100 {
42 compatible = "qnap,power-off"; 42 compatible = "qnap,power-off";
43 reg = <0x12000 0x100>; 43 reg = <0x12100 0x100>;
44 clocks = <&gate_clk 7>; 44 clocks = <&gate_clk 7>;
45 }; 45 };
46 spi@10600 { 46 spi@10600 {
diff --git a/arch/arm/boot/dts/rk3288-veyron-minnie.dts b/arch/arm/boot/dts/rk3288-veyron-minnie.dts
index 8fd8ef2c72da..85f0373df498 100644
--- a/arch/arm/boot/dts/rk3288-veyron-minnie.dts
+++ b/arch/arm/boot/dts/rk3288-veyron-minnie.dts
@@ -86,6 +86,10 @@
86 }; 86 };
87}; 87};
88 88
89&emmc {
90 /delete-property/mmc-hs200-1_8v;
91};
92
89&gpio_keys { 93&gpio_keys {
90 pinctrl-0 = <&pwr_key_l &ap_lid_int_l &volum_down_l &volum_up_l>; 94 pinctrl-0 = <&pwr_key_l &ap_lid_int_l &volum_down_l &volum_up_l>;
91 95
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index 6a79c9c526b8..04ea209f1737 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -452,8 +452,10 @@
452 clock-names = "tsadc", "apb_pclk"; 452 clock-names = "tsadc", "apb_pclk";
453 resets = <&cru SRST_TSADC>; 453 resets = <&cru SRST_TSADC>;
454 reset-names = "tsadc-apb"; 454 reset-names = "tsadc-apb";
455 pinctrl-names = "default"; 455 pinctrl-names = "init", "default", "sleep";
456 pinctrl-0 = <&otp_out>; 456 pinctrl-0 = <&otp_gpio>;
457 pinctrl-1 = <&otp_out>;
458 pinctrl-2 = <&otp_gpio>;
457 #thermal-sensor-cells = <1>; 459 #thermal-sensor-cells = <1>;
458 rockchip,hw-tshut-temp = <95000>; 460 rockchip,hw-tshut-temp = <95000>;
459 status = "disabled"; 461 status = "disabled";
@@ -1395,6 +1397,10 @@
1395 }; 1397 };
1396 1398
1397 tsadc { 1399 tsadc {
1400 otp_gpio: otp-gpio {
1401 rockchip,pins = <0 10 RK_FUNC_GPIO &pcfg_pull_none>;
1402 };
1403
1398 otp_out: otp-out { 1404 otp_out: otp-out {
1399 rockchip,pins = <0 10 RK_FUNC_1 &pcfg_pull_none>; 1405 rockchip,pins = <0 10 RK_FUNC_1 &pcfg_pull_none>;
1400 }; 1406 };
diff --git a/arch/arm/boot/dts/sama5d35ek.dts b/arch/arm/boot/dts/sama5d35ek.dts
index d9a9aca1ccfd..e812f5c1bf70 100644
--- a/arch/arm/boot/dts/sama5d35ek.dts
+++ b/arch/arm/boot/dts/sama5d35ek.dts
@@ -49,7 +49,7 @@
49 label = "pb_user1"; 49 label = "pb_user1";
50 gpios = <&pioE 27 GPIO_ACTIVE_HIGH>; 50 gpios = <&pioE 27 GPIO_ACTIVE_HIGH>;
51 linux,code = <0x100>; 51 linux,code = <0x100>;
52 gpio-key,wakeup; 52 wakeup-source;
53 }; 53 };
54 }; 54 };
55}; 55};
diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
index 15bbaf690047..2193637b9cd2 100644
--- a/arch/arm/boot/dts/sama5d4.dtsi
+++ b/arch/arm/boot/dts/sama5d4.dtsi
@@ -1300,7 +1300,7 @@
1300 }; 1300 };
1301 1301
1302 watchdog@fc068640 { 1302 watchdog@fc068640 {
1303 compatible = "atmel,at91sam9260-wdt"; 1303 compatible = "atmel,sama5d4-wdt";
1304 reg = <0xfc068640 0x10>; 1304 reg = <0xfc068640 0x10>;
1305 clocks = <&clk32k>; 1305 clocks = <&clk32k>;
1306 status = "disabled"; 1306 status = "disabled";
diff --git a/arch/arm/boot/dts/usb_a9260_common.dtsi b/arch/arm/boot/dts/usb_a9260_common.dtsi
index 12edafefd44a..9beea8976584 100644
--- a/arch/arm/boot/dts/usb_a9260_common.dtsi
+++ b/arch/arm/boot/dts/usb_a9260_common.dtsi
@@ -115,7 +115,7 @@
115 label = "user_pb"; 115 label = "user_pb";
116 gpios = <&pioB 10 GPIO_ACTIVE_LOW>; 116 gpios = <&pioB 10 GPIO_ACTIVE_LOW>;
117 linux,code = <28>; 117 linux,code = <28>;
118 gpio-key,wakeup; 118 wakeup-source;
119 }; 119 };
120 }; 120 };
121 121
diff --git a/arch/arm/boot/dts/usb_a9263.dts b/arch/arm/boot/dts/usb_a9263.dts
index 68c0de36c339..8cc6edb29694 100644
--- a/arch/arm/boot/dts/usb_a9263.dts
+++ b/arch/arm/boot/dts/usb_a9263.dts
@@ -143,7 +143,7 @@
143 label = "user_pb"; 143 label = "user_pb";
144 gpios = <&pioB 10 GPIO_ACTIVE_LOW>; 144 gpios = <&pioB 10 GPIO_ACTIVE_LOW>;
145 linux,code = <28>; 145 linux,code = <28>;
146 gpio-key,wakeup; 146 wakeup-source;
147 }; 147 };
148 }; 148 };
149 149
diff --git a/arch/arm/boot/dts/vfxxx.dtsi b/arch/arm/boot/dts/vfxxx.dtsi
index 6736bae43a5b..0d5acc2cdc8e 100644
--- a/arch/arm/boot/dts/vfxxx.dtsi
+++ b/arch/arm/boot/dts/vfxxx.dtsi
@@ -158,7 +158,7 @@
158 interrupts = <67 IRQ_TYPE_LEVEL_HIGH>; 158 interrupts = <67 IRQ_TYPE_LEVEL_HIGH>;
159 clocks = <&clks VF610_CLK_DSPI0>; 159 clocks = <&clks VF610_CLK_DSPI0>;
160 clock-names = "dspi"; 160 clock-names = "dspi";
161 spi-num-chipselects = <5>; 161 spi-num-chipselects = <6>;
162 status = "disabled"; 162 status = "disabled";
163 }; 163 };
164 164
@@ -170,7 +170,7 @@
170 interrupts = <68 IRQ_TYPE_LEVEL_HIGH>; 170 interrupts = <68 IRQ_TYPE_LEVEL_HIGH>;
171 clocks = <&clks VF610_CLK_DSPI1>; 171 clocks = <&clks VF610_CLK_DSPI1>;
172 clock-names = "dspi"; 172 clock-names = "dspi";
173 spi-num-chipselects = <5>; 173 spi-num-chipselects = <4>;
174 status = "disabled"; 174 status = "disabled";
175 }; 175 };
176 176
@@ -461,6 +461,8 @@
461 clock-names = "adc"; 461 clock-names = "adc";
462 #io-channel-cells = <1>; 462 #io-channel-cells = <1>;
463 status = "disabled"; 463 status = "disabled";
464 fsl,adck-max-frequency = <30000000>, <40000000>,
465 <20000000>;
464 }; 466 };
465 467
466 esdhc0: esdhc@400b1000 { 468 esdhc0: esdhc@400b1000 {
@@ -472,8 +474,6 @@
472 <&clks VF610_CLK_ESDHC0>; 474 <&clks VF610_CLK_ESDHC0>;
473 clock-names = "ipg", "ahb", "per"; 475 clock-names = "ipg", "ahb", "per";
474 status = "disabled"; 476 status = "disabled";
475 fsl,adck-max-frequency = <30000000>, <40000000>,
476 <20000000>;
477 }; 477 };
478 478
479 esdhc1: esdhc@400b2000 { 479 esdhc1: esdhc@400b2000 {
diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig
index 1b1e5acd76e2..e4b1be66b3f5 100644
--- a/arch/arm/configs/at91_dt_defconfig
+++ b/arch/arm/configs/at91_dt_defconfig
@@ -125,7 +125,6 @@ CONFIG_POWER_RESET=y
125# CONFIG_HWMON is not set 125# CONFIG_HWMON is not set
126CONFIG_WATCHDOG=y 126CONFIG_WATCHDOG=y
127CONFIG_AT91SAM9X_WATCHDOG=y 127CONFIG_AT91SAM9X_WATCHDOG=y
128CONFIG_SSB=m
129CONFIG_MFD_ATMEL_HLCDC=y 128CONFIG_MFD_ATMEL_HLCDC=y
130CONFIG_REGULATOR=y 129CONFIG_REGULATOR=y
131CONFIG_REGULATOR_FIXED_VOLTAGE=y 130CONFIG_REGULATOR_FIXED_VOLTAGE=y
diff --git a/arch/arm/configs/sama5_defconfig b/arch/arm/configs/sama5_defconfig
index a0c57ac88b27..63f7e6ce649a 100644
--- a/arch/arm/configs/sama5_defconfig
+++ b/arch/arm/configs/sama5_defconfig
@@ -129,7 +129,6 @@ CONFIG_GPIO_SYSFS=y
129CONFIG_POWER_SUPPLY=y 129CONFIG_POWER_SUPPLY=y
130CONFIG_POWER_RESET=y 130CONFIG_POWER_RESET=y
131# CONFIG_HWMON is not set 131# CONFIG_HWMON is not set
132CONFIG_SSB=m
133CONFIG_MFD_ATMEL_FLEXCOM=y 132CONFIG_MFD_ATMEL_FLEXCOM=y
134CONFIG_REGULATOR=y 133CONFIG_REGULATOR=y
135CONFIG_REGULATOR_FIXED_VOLTAGE=y 134CONFIG_REGULATOR_FIXED_VOLTAGE=y
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index be1d07d59ee9..1bd9510de1b9 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -40,6 +40,11 @@ extern void arch_trigger_all_cpu_backtrace(bool);
40#define arch_trigger_all_cpu_backtrace(x) arch_trigger_all_cpu_backtrace(x) 40#define arch_trigger_all_cpu_backtrace(x) arch_trigger_all_cpu_backtrace(x)
41#endif 41#endif
42 42
43static inline int nr_legacy_irqs(void)
44{
45 return NR_IRQS_LEGACY;
46}
47
43#endif 48#endif
44 49
45#endif 50#endif
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index a9c80a2ea1a7..3095df091ff8 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -28,6 +28,18 @@
28unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); 28unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
29unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu); 29unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu);
30 30
31static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu,
32 u8 reg_num)
33{
34 return *vcpu_reg(vcpu, reg_num);
35}
36
37static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
38 unsigned long val)
39{
40 *vcpu_reg(vcpu, reg_num) = val;
41}
42
31bool kvm_condition_valid(struct kvm_vcpu *vcpu); 43bool kvm_condition_valid(struct kvm_vcpu *vcpu);
32void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr); 44void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr);
33void kvm_inject_undefined(struct kvm_vcpu *vcpu); 45void kvm_inject_undefined(struct kvm_vcpu *vcpu);
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
index 7a2a32a1d5a8..ede692ffa32e 100644
--- a/arch/arm/include/uapi/asm/unistd.h
+++ b/arch/arm/include/uapi/asm/unistd.h
@@ -416,6 +416,7 @@
416#define __NR_execveat (__NR_SYSCALL_BASE+387) 416#define __NR_execveat (__NR_SYSCALL_BASE+387)
417#define __NR_userfaultfd (__NR_SYSCALL_BASE+388) 417#define __NR_userfaultfd (__NR_SYSCALL_BASE+388)
418#define __NR_membarrier (__NR_SYSCALL_BASE+389) 418#define __NR_membarrier (__NR_SYSCALL_BASE+389)
419#define __NR_mlock2 (__NR_SYSCALL_BASE+390)
419 420
420/* 421/*
421 * The following SWIs are ARM private. 422 * The following SWIs are ARM private.
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index 6551d28c27e6..066f7f9ba411 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -17,11 +17,6 @@
17#include <asm/mach/pci.h> 17#include <asm/mach/pci.h>
18 18
19static int debug_pci; 19static int debug_pci;
20static resource_size_t (*align_resource)(struct pci_dev *dev,
21 const struct resource *res,
22 resource_size_t start,
23 resource_size_t size,
24 resource_size_t align) = NULL;
25 20
26/* 21/*
27 * We can't use pci_get_device() here since we are 22 * We can't use pci_get_device() here since we are
@@ -461,7 +456,6 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
461 sys->busnr = busnr; 456 sys->busnr = busnr;
462 sys->swizzle = hw->swizzle; 457 sys->swizzle = hw->swizzle;
463 sys->map_irq = hw->map_irq; 458 sys->map_irq = hw->map_irq;
464 align_resource = hw->align_resource;
465 INIT_LIST_HEAD(&sys->resources); 459 INIT_LIST_HEAD(&sys->resources);
466 460
467 if (hw->private_data) 461 if (hw->private_data)
@@ -470,6 +464,8 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
470 ret = hw->setup(nr, sys); 464 ret = hw->setup(nr, sys);
471 465
472 if (ret > 0) { 466 if (ret > 0) {
467 struct pci_host_bridge *host_bridge;
468
473 ret = pcibios_init_resources(nr, sys); 469 ret = pcibios_init_resources(nr, sys);
474 if (ret) { 470 if (ret) {
475 kfree(sys); 471 kfree(sys);
@@ -491,6 +487,9 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
491 busnr = sys->bus->busn_res.end + 1; 487 busnr = sys->bus->busn_res.end + 1;
492 488
493 list_add(&sys->node, head); 489 list_add(&sys->node, head);
490
491 host_bridge = pci_find_host_bridge(sys->bus);
492 host_bridge->align_resource = hw->align_resource;
494 } else { 493 } else {
495 kfree(sys); 494 kfree(sys);
496 if (ret < 0) 495 if (ret < 0)
@@ -578,14 +577,18 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
578{ 577{
579 struct pci_dev *dev = data; 578 struct pci_dev *dev = data;
580 resource_size_t start = res->start; 579 resource_size_t start = res->start;
580 struct pci_host_bridge *host_bridge;
581 581
582 if (res->flags & IORESOURCE_IO && start & 0x300) 582 if (res->flags & IORESOURCE_IO && start & 0x300)
583 start = (start + 0x3ff) & ~0x3ff; 583 start = (start + 0x3ff) & ~0x3ff;
584 584
585 start = (start + align - 1) & ~(align - 1); 585 start = (start + align - 1) & ~(align - 1);
586 586
587 if (align_resource) 587 host_bridge = pci_find_host_bridge(dev->bus);
588 return align_resource(dev, res, start, size, align); 588
589 if (host_bridge->align_resource)
590 return host_bridge->align_resource(dev, res,
591 start, size, align);
589 592
590 return start; 593 return start;
591} 594}
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index fde6c88d560c..ac368bb068d1 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -399,6 +399,7 @@
399 CALL(sys_execveat) 399 CALL(sys_execveat)
400 CALL(sys_userfaultfd) 400 CALL(sys_userfaultfd)
401 CALL(sys_membarrier) 401 CALL(sys_membarrier)
402 CALL(sys_mlock2)
402#ifndef syscalls_counted 403#ifndef syscalls_counted
403.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls 404.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
404#define syscalls_counted 405#define syscalls_counted
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index eab83b2435b8..e06fd299de08 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -564,17 +564,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
564 vcpu_sleep(vcpu); 564 vcpu_sleep(vcpu);
565 565
566 /* 566 /*
567 * Disarming the background timer must be done in a
568 * preemptible context, as this call may sleep.
569 */
570 kvm_timer_flush_hwstate(vcpu);
571
572 /*
573 * Preparing the interrupts to be injected also 567 * Preparing the interrupts to be injected also
574 * involves poking the GIC, which must be done in a 568 * involves poking the GIC, which must be done in a
575 * non-preemptible context. 569 * non-preemptible context.
576 */ 570 */
577 preempt_disable(); 571 preempt_disable();
572 kvm_timer_flush_hwstate(vcpu);
578 kvm_vgic_flush_hwstate(vcpu); 573 kvm_vgic_flush_hwstate(vcpu);
579 574
580 local_irq_disable(); 575 local_irq_disable();
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index 974b1c606d04..3a10c9f1d0a4 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -115,7 +115,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
115 trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr, 115 trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
116 data); 116 data);
117 data = vcpu_data_host_to_guest(vcpu, data, len); 117 data = vcpu_data_host_to_guest(vcpu, data, len);
118 *vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt) = data; 118 vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
119 } 119 }
120 120
121 return 0; 121 return 0;
@@ -186,7 +186,8 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
186 rt = vcpu->arch.mmio_decode.rt; 186 rt = vcpu->arch.mmio_decode.rt;
187 187
188 if (is_write) { 188 if (is_write) {
189 data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), len); 189 data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
190 len);
190 191
191 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data); 192 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
192 mmio_write_buf(data_buf, len, data); 193 mmio_write_buf(data_buf, len, data);
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 6984342da13d..61d96a645ff3 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -98,6 +98,11 @@ static void kvm_flush_dcache_pud(pud_t pud)
98 __kvm_flush_dcache_pud(pud); 98 __kvm_flush_dcache_pud(pud);
99} 99}
100 100
101static bool kvm_is_device_pfn(unsigned long pfn)
102{
103 return !pfn_valid(pfn);
104}
105
101/** 106/**
102 * stage2_dissolve_pmd() - clear and flush huge PMD entry 107 * stage2_dissolve_pmd() - clear and flush huge PMD entry
103 * @kvm: pointer to kvm structure. 108 * @kvm: pointer to kvm structure.
@@ -213,7 +218,7 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
213 kvm_tlb_flush_vmid_ipa(kvm, addr); 218 kvm_tlb_flush_vmid_ipa(kvm, addr);
214 219
215 /* No need to invalidate the cache for device mappings */ 220 /* No need to invalidate the cache for device mappings */
216 if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE) 221 if (!kvm_is_device_pfn(pte_pfn(old_pte)))
217 kvm_flush_dcache_pte(old_pte); 222 kvm_flush_dcache_pte(old_pte);
218 223
219 put_page(virt_to_page(pte)); 224 put_page(virt_to_page(pte));
@@ -305,8 +310,7 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
305 310
306 pte = pte_offset_kernel(pmd, addr); 311 pte = pte_offset_kernel(pmd, addr);
307 do { 312 do {
308 if (!pte_none(*pte) && 313 if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte)))
309 (pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
310 kvm_flush_dcache_pte(*pte); 314 kvm_flush_dcache_pte(*pte);
311 } while (pte++, addr += PAGE_SIZE, addr != end); 315 } while (pte++, addr += PAGE_SIZE, addr != end);
312} 316}
@@ -1037,11 +1041,6 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
1037 return kvm_vcpu_dabt_iswrite(vcpu); 1041 return kvm_vcpu_dabt_iswrite(vcpu);
1038} 1042}
1039 1043
1040static bool kvm_is_device_pfn(unsigned long pfn)
1041{
1042 return !pfn_valid(pfn);
1043}
1044
1045/** 1044/**
1046 * stage2_wp_ptes - write protect PMD range 1045 * stage2_wp_ptes - write protect PMD range
1047 * @pmd: pointer to pmd entry 1046 * @pmd: pointer to pmd entry
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
index 0b556968a6da..a9b3b905e661 100644
--- a/arch/arm/kvm/psci.c
+++ b/arch/arm/kvm/psci.c
@@ -75,7 +75,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
75 unsigned long context_id; 75 unsigned long context_id;
76 phys_addr_t target_pc; 76 phys_addr_t target_pc;
77 77
78 cpu_id = *vcpu_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK; 78 cpu_id = vcpu_get_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK;
79 if (vcpu_mode_is_32bit(source_vcpu)) 79 if (vcpu_mode_is_32bit(source_vcpu))
80 cpu_id &= ~((u32) 0); 80 cpu_id &= ~((u32) 0);
81 81
@@ -94,8 +94,8 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
94 return PSCI_RET_INVALID_PARAMS; 94 return PSCI_RET_INVALID_PARAMS;
95 } 95 }
96 96
97 target_pc = *vcpu_reg(source_vcpu, 2); 97 target_pc = vcpu_get_reg(source_vcpu, 2);
98 context_id = *vcpu_reg(source_vcpu, 3); 98 context_id = vcpu_get_reg(source_vcpu, 3);
99 99
100 kvm_reset_vcpu(vcpu); 100 kvm_reset_vcpu(vcpu);
101 101
@@ -114,7 +114,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
114 * NOTE: We always update r0 (or x0) because for PSCI v0.1 114 * NOTE: We always update r0 (or x0) because for PSCI v0.1
115 * the general puspose registers are undefined upon CPU_ON. 115 * the general puspose registers are undefined upon CPU_ON.
116 */ 116 */
117 *vcpu_reg(vcpu, 0) = context_id; 117 vcpu_set_reg(vcpu, 0, context_id);
118 vcpu->arch.power_off = false; 118 vcpu->arch.power_off = false;
119 smp_mb(); /* Make sure the above is visible */ 119 smp_mb(); /* Make sure the above is visible */
120 120
@@ -134,8 +134,8 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
134 struct kvm *kvm = vcpu->kvm; 134 struct kvm *kvm = vcpu->kvm;
135 struct kvm_vcpu *tmp; 135 struct kvm_vcpu *tmp;
136 136
137 target_affinity = *vcpu_reg(vcpu, 1); 137 target_affinity = vcpu_get_reg(vcpu, 1);
138 lowest_affinity_level = *vcpu_reg(vcpu, 2); 138 lowest_affinity_level = vcpu_get_reg(vcpu, 2);
139 139
140 /* Determine target affinity mask */ 140 /* Determine target affinity mask */
141 target_affinity_mask = psci_affinity_mask(lowest_affinity_level); 141 target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
@@ -209,7 +209,7 @@ int kvm_psci_version(struct kvm_vcpu *vcpu)
209static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) 209static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
210{ 210{
211 int ret = 1; 211 int ret = 1;
212 unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0); 212 unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
213 unsigned long val; 213 unsigned long val;
214 214
215 switch (psci_fn) { 215 switch (psci_fn) {
@@ -273,13 +273,13 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
273 break; 273 break;
274 } 274 }
275 275
276 *vcpu_reg(vcpu, 0) = val; 276 vcpu_set_reg(vcpu, 0, val);
277 return ret; 277 return ret;
278} 278}
279 279
280static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) 280static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
281{ 281{
282 unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0); 282 unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
283 unsigned long val; 283 unsigned long val;
284 284
285 switch (psci_fn) { 285 switch (psci_fn) {
@@ -295,7 +295,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
295 break; 295 break;
296 } 296 }
297 297
298 *vcpu_reg(vcpu, 0) = val; 298 vcpu_set_reg(vcpu, 0, val);
299 return 1; 299 return 1;
300} 300}
301 301
diff --git a/arch/arm/mach-dove/include/mach/entry-macro.S b/arch/arm/mach-dove/include/mach/entry-macro.S
index 72d622baaad3..df1d44bdc375 100644
--- a/arch/arm/mach-dove/include/mach/entry-macro.S
+++ b/arch/arm/mach-dove/include/mach/entry-macro.S
@@ -18,13 +18,13 @@
18 @ check low interrupts 18 @ check low interrupts
19 ldr \irqstat, [\base, #IRQ_CAUSE_LOW_OFF] 19 ldr \irqstat, [\base, #IRQ_CAUSE_LOW_OFF]
20 ldr \tmp, [\base, #IRQ_MASK_LOW_OFF] 20 ldr \tmp, [\base, #IRQ_MASK_LOW_OFF]
21 mov \irqnr, #31 21 mov \irqnr, #32
22 ands \irqstat, \irqstat, \tmp 22 ands \irqstat, \irqstat, \tmp
23 23
24 @ if no low interrupts set, check high interrupts 24 @ if no low interrupts set, check high interrupts
25 ldreq \irqstat, [\base, #IRQ_CAUSE_HIGH_OFF] 25 ldreq \irqstat, [\base, #IRQ_CAUSE_HIGH_OFF]
26 ldreq \tmp, [\base, #IRQ_MASK_HIGH_OFF] 26 ldreq \tmp, [\base, #IRQ_MASK_HIGH_OFF]
27 moveq \irqnr, #63 27 moveq \irqnr, #64
28 andeqs \irqstat, \irqstat, \tmp 28 andeqs \irqstat, \irqstat, \tmp
29 29
30 @ find first active interrupt source 30 @ find first active interrupt source
diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c
index 8e7976a4c3e7..cfc696b972f3 100644
--- a/arch/arm/mach-imx/gpc.c
+++ b/arch/arm/mach-imx/gpc.c
@@ -177,6 +177,7 @@ static struct irq_chip imx_gpc_chip = {
177 .irq_unmask = imx_gpc_irq_unmask, 177 .irq_unmask = imx_gpc_irq_unmask,
178 .irq_retrigger = irq_chip_retrigger_hierarchy, 178 .irq_retrigger = irq_chip_retrigger_hierarchy,
179 .irq_set_wake = imx_gpc_irq_set_wake, 179 .irq_set_wake = imx_gpc_irq_set_wake,
180 .irq_set_type = irq_chip_set_type_parent,
180#ifdef CONFIG_SMP 181#ifdef CONFIG_SMP
181 .irq_set_affinity = irq_chip_set_affinity_parent, 182 .irq_set_affinity = irq_chip_set_affinity_parent,
182#endif 183#endif
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index 5305ec7341ec..79e1f876d1c9 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -143,9 +143,9 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
143 * Ensure that CPU power state is set to ON to avoid CPU 143 * Ensure that CPU power state is set to ON to avoid CPU
144 * powerdomain transition on wfi 144 * powerdomain transition on wfi
145 */ 145 */
146 clkdm_wakeup(cpu1_clkdm); 146 clkdm_wakeup_nolock(cpu1_clkdm);
147 omap_set_pwrdm_state(cpu1_pwrdm, PWRDM_POWER_ON); 147 pwrdm_set_next_pwrst(cpu1_pwrdm, PWRDM_POWER_ON);
148 clkdm_allow_idle(cpu1_clkdm); 148 clkdm_allow_idle_nolock(cpu1_clkdm);
149 149
150 if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD)) { 150 if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD)) {
151 while (gic_dist_disabled()) { 151 while (gic_dist_disabled()) {
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index cc8a987149e2..48495ad82aba 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -890,6 +890,36 @@ static int _init_opt_clks(struct omap_hwmod *oh)
890 return ret; 890 return ret;
891} 891}
892 892
893static void _enable_optional_clocks(struct omap_hwmod *oh)
894{
895 struct omap_hwmod_opt_clk *oc;
896 int i;
897
898 pr_debug("omap_hwmod: %s: enabling optional clocks\n", oh->name);
899
900 for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
901 if (oc->_clk) {
902 pr_debug("omap_hwmod: enable %s:%s\n", oc->role,
903 __clk_get_name(oc->_clk));
904 clk_enable(oc->_clk);
905 }
906}
907
908static void _disable_optional_clocks(struct omap_hwmod *oh)
909{
910 struct omap_hwmod_opt_clk *oc;
911 int i;
912
913 pr_debug("omap_hwmod: %s: disabling optional clocks\n", oh->name);
914
915 for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
916 if (oc->_clk) {
917 pr_debug("omap_hwmod: disable %s:%s\n", oc->role,
918 __clk_get_name(oc->_clk));
919 clk_disable(oc->_clk);
920 }
921}
922
893/** 923/**
894 * _enable_clocks - enable hwmod main clock and interface clocks 924 * _enable_clocks - enable hwmod main clock and interface clocks
895 * @oh: struct omap_hwmod * 925 * @oh: struct omap_hwmod *
@@ -917,6 +947,9 @@ static int _enable_clocks(struct omap_hwmod *oh)
917 clk_enable(os->_clk); 947 clk_enable(os->_clk);
918 } 948 }
919 949
950 if (oh->flags & HWMOD_OPT_CLKS_NEEDED)
951 _enable_optional_clocks(oh);
952
920 /* The opt clocks are controlled by the device driver. */ 953 /* The opt clocks are controlled by the device driver. */
921 954
922 return 0; 955 return 0;
@@ -948,41 +981,14 @@ static int _disable_clocks(struct omap_hwmod *oh)
948 clk_disable(os->_clk); 981 clk_disable(os->_clk);
949 } 982 }
950 983
984 if (oh->flags & HWMOD_OPT_CLKS_NEEDED)
985 _disable_optional_clocks(oh);
986
951 /* The opt clocks are controlled by the device driver. */ 987 /* The opt clocks are controlled by the device driver. */
952 988
953 return 0; 989 return 0;
954} 990}
955 991
956static void _enable_optional_clocks(struct omap_hwmod *oh)
957{
958 struct omap_hwmod_opt_clk *oc;
959 int i;
960
961 pr_debug("omap_hwmod: %s: enabling optional clocks\n", oh->name);
962
963 for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
964 if (oc->_clk) {
965 pr_debug("omap_hwmod: enable %s:%s\n", oc->role,
966 __clk_get_name(oc->_clk));
967 clk_enable(oc->_clk);
968 }
969}
970
971static void _disable_optional_clocks(struct omap_hwmod *oh)
972{
973 struct omap_hwmod_opt_clk *oc;
974 int i;
975
976 pr_debug("omap_hwmod: %s: disabling optional clocks\n", oh->name);
977
978 for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
979 if (oc->_clk) {
980 pr_debug("omap_hwmod: disable %s:%s\n", oc->role,
981 __clk_get_name(oc->_clk));
982 clk_disable(oc->_clk);
983 }
984}
985
986/** 992/**
987 * _omap4_enable_module - enable CLKCTRL modulemode on OMAP4 993 * _omap4_enable_module - enable CLKCTRL modulemode on OMAP4
988 * @oh: struct omap_hwmod * 994 * @oh: struct omap_hwmod *
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h
index ca6df1a73475..76bce11c85a4 100644
--- a/arch/arm/mach-omap2/omap_hwmod.h
+++ b/arch/arm/mach-omap2/omap_hwmod.h
@@ -523,6 +523,8 @@ struct omap_hwmod_omap4_prcm {
523 * HWMOD_RECONFIG_IO_CHAIN: omap_hwmod code needs to reconfigure wake-up 523 * HWMOD_RECONFIG_IO_CHAIN: omap_hwmod code needs to reconfigure wake-up
524 * events by calling _reconfigure_io_chain() when a device is enabled 524 * events by calling _reconfigure_io_chain() when a device is enabled
525 * or idled. 525 * or idled.
526 * HWMOD_OPT_CLKS_NEEDED: The optional clocks are needed for the module to
527 * operate and they need to be handled at the same time as the main_clk.
526 */ 528 */
527#define HWMOD_SWSUP_SIDLE (1 << 0) 529#define HWMOD_SWSUP_SIDLE (1 << 0)
528#define HWMOD_SWSUP_MSTANDBY (1 << 1) 530#define HWMOD_SWSUP_MSTANDBY (1 << 1)
@@ -538,6 +540,7 @@ struct omap_hwmod_omap4_prcm {
538#define HWMOD_FORCE_MSTANDBY (1 << 11) 540#define HWMOD_FORCE_MSTANDBY (1 << 11)
539#define HWMOD_SWSUP_SIDLE_ACT (1 << 12) 541#define HWMOD_SWSUP_SIDLE_ACT (1 << 12)
540#define HWMOD_RECONFIG_IO_CHAIN (1 << 13) 542#define HWMOD_RECONFIG_IO_CHAIN (1 << 13)
543#define HWMOD_OPT_CLKS_NEEDED (1 << 14)
541 544
542/* 545/*
543 * omap_hwmod._int_flags definitions 546 * omap_hwmod._int_flags definitions
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index 51d1ecb384bd..ee4e04434a94 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -1298,6 +1298,44 @@ static struct omap_hwmod dra7xx_mcspi4_hwmod = {
1298}; 1298};
1299 1299
1300/* 1300/*
1301 * 'mcasp' class
1302 *
1303 */
1304static struct omap_hwmod_class_sysconfig dra7xx_mcasp_sysc = {
1305 .sysc_offs = 0x0004,
1306 .sysc_flags = SYSC_HAS_SIDLEMODE,
1307 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
1308 .sysc_fields = &omap_hwmod_sysc_type3,
1309};
1310
1311static struct omap_hwmod_class dra7xx_mcasp_hwmod_class = {
1312 .name = "mcasp",
1313 .sysc = &dra7xx_mcasp_sysc,
1314};
1315
1316/* mcasp3 */
1317static struct omap_hwmod_opt_clk mcasp3_opt_clks[] = {
1318 { .role = "ahclkx", .clk = "mcasp3_ahclkx_mux" },
1319};
1320
1321static struct omap_hwmod dra7xx_mcasp3_hwmod = {
1322 .name = "mcasp3",
1323 .class = &dra7xx_mcasp_hwmod_class,
1324 .clkdm_name = "l4per2_clkdm",
1325 .main_clk = "mcasp3_aux_gfclk_mux",
1326 .flags = HWMOD_OPT_CLKS_NEEDED,
1327 .prcm = {
1328 .omap4 = {
1329 .clkctrl_offs = DRA7XX_CM_L4PER2_MCASP3_CLKCTRL_OFFSET,
1330 .context_offs = DRA7XX_RM_L4PER2_MCASP3_CONTEXT_OFFSET,
1331 .modulemode = MODULEMODE_SWCTRL,
1332 },
1333 },
1334 .opt_clks = mcasp3_opt_clks,
1335 .opt_clks_cnt = ARRAY_SIZE(mcasp3_opt_clks),
1336};
1337
1338/*
1301 * 'mmc' class 1339 * 'mmc' class
1302 * 1340 *
1303 */ 1341 */
@@ -2566,6 +2604,22 @@ static struct omap_hwmod_ocp_if dra7xx_l3_main_1__hdmi = {
2566 .user = OCP_USER_MPU | OCP_USER_SDMA, 2604 .user = OCP_USER_MPU | OCP_USER_SDMA,
2567}; 2605};
2568 2606
2607/* l4_per2 -> mcasp3 */
2608static struct omap_hwmod_ocp_if dra7xx_l4_per2__mcasp3 = {
2609 .master = &dra7xx_l4_per2_hwmod,
2610 .slave = &dra7xx_mcasp3_hwmod,
2611 .clk = "l4_root_clk_div",
2612 .user = OCP_USER_MPU | OCP_USER_SDMA,
2613};
2614
2615/* l3_main_1 -> mcasp3 */
2616static struct omap_hwmod_ocp_if dra7xx_l3_main_1__mcasp3 = {
2617 .master = &dra7xx_l3_main_1_hwmod,
2618 .slave = &dra7xx_mcasp3_hwmod,
2619 .clk = "l3_iclk_div",
2620 .user = OCP_USER_MPU | OCP_USER_SDMA,
2621};
2622
2569/* l4_per1 -> elm */ 2623/* l4_per1 -> elm */
2570static struct omap_hwmod_ocp_if dra7xx_l4_per1__elm = { 2624static struct omap_hwmod_ocp_if dra7xx_l4_per1__elm = {
2571 .master = &dra7xx_l4_per1_hwmod, 2625 .master = &dra7xx_l4_per1_hwmod,
@@ -3308,6 +3362,8 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = {
3308 &dra7xx_l4_wkup__dcan1, 3362 &dra7xx_l4_wkup__dcan1,
3309 &dra7xx_l4_per2__dcan2, 3363 &dra7xx_l4_per2__dcan2,
3310 &dra7xx_l4_per2__cpgmac0, 3364 &dra7xx_l4_per2__cpgmac0,
3365 &dra7xx_l4_per2__mcasp3,
3366 &dra7xx_l3_main_1__mcasp3,
3311 &dra7xx_gmac__mdio, 3367 &dra7xx_gmac__mdio,
3312 &dra7xx_l4_cfg__dma_system, 3368 &dra7xx_l4_cfg__dma_system,
3313 &dra7xx_l3_main_1__dss, 3369 &dra7xx_l3_main_1__dss,
diff --git a/arch/arm/mach-omap2/omap_hwmod_81xx_data.c b/arch/arm/mach-omap2/omap_hwmod_81xx_data.c
index b1288f56d509..6256052893ec 100644
--- a/arch/arm/mach-omap2/omap_hwmod_81xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_81xx_data.c
@@ -144,6 +144,7 @@ static struct omap_hwmod dm81xx_l4_ls_hwmod = {
144 .name = "l4_ls", 144 .name = "l4_ls",
145 .clkdm_name = "alwon_l3s_clkdm", 145 .clkdm_name = "alwon_l3s_clkdm",
146 .class = &l4_hwmod_class, 146 .class = &l4_hwmod_class,
147 .flags = HWMOD_NO_IDLEST,
147}; 148};
148 149
149/* 150/*
@@ -155,6 +156,7 @@ static struct omap_hwmod dm81xx_l4_hs_hwmod = {
155 .name = "l4_hs", 156 .name = "l4_hs",
156 .clkdm_name = "alwon_l3_med_clkdm", 157 .clkdm_name = "alwon_l3_med_clkdm",
157 .class = &l4_hwmod_class, 158 .class = &l4_hwmod_class,
159 .flags = HWMOD_NO_IDLEST,
158}; 160};
159 161
160/* L3 slow -> L4 ls peripheral interface running at 125MHz */ 162/* L3 slow -> L4 ls peripheral interface running at 125MHz */
@@ -850,6 +852,7 @@ static struct omap_hwmod dm816x_emac0_hwmod = {
850 .name = "emac0", 852 .name = "emac0",
851 .clkdm_name = "alwon_ethernet_clkdm", 853 .clkdm_name = "alwon_ethernet_clkdm",
852 .class = &dm816x_emac_hwmod_class, 854 .class = &dm816x_emac_hwmod_class,
855 .flags = HWMOD_NO_IDLEST,
853}; 856};
854 857
855static struct omap_hwmod_ocp_if dm81xx_l4_hs__emac0 = { 858static struct omap_hwmod_ocp_if dm81xx_l4_hs__emac0 = {
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index 1dfe34654c43..58144779dec4 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -24,9 +24,6 @@
24#include <linux/platform_data/iommu-omap.h> 24#include <linux/platform_data/iommu-omap.h>
25#include <linux/platform_data/wkup_m3.h> 25#include <linux/platform_data/wkup_m3.h>
26 26
27#include <asm/siginfo.h>
28#include <asm/signal.h>
29
30#include "common.h" 27#include "common.h"
31#include "common-board-devices.h" 28#include "common-board-devices.h"
32#include "dss-common.h" 29#include "dss-common.h"
@@ -385,29 +382,6 @@ static void __init omap3_pandora_legacy_init(void)
385} 382}
386#endif /* CONFIG_ARCH_OMAP3 */ 383#endif /* CONFIG_ARCH_OMAP3 */
387 384
388#ifdef CONFIG_SOC_TI81XX
389static int fault_fixed_up;
390
391static int t410_abort_handler(unsigned long addr, unsigned int fsr,
392 struct pt_regs *regs)
393{
394 if ((fsr == 0x406 || fsr == 0xc06) && !fault_fixed_up) {
395 pr_warn("External imprecise Data abort at addr=%#lx, fsr=%#x ignored.\n",
396 addr, fsr);
397 fault_fixed_up = 1;
398 return 0;
399 }
400
401 return 1;
402}
403
404static void __init t410_abort_init(void)
405{
406 hook_fault_code(16 + 6, t410_abort_handler, SIGBUS, BUS_OBJERR,
407 "imprecise external abort");
408}
409#endif
410
411#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) 385#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5)
412static struct iommu_platform_data omap4_iommu_pdata = { 386static struct iommu_platform_data omap4_iommu_pdata = {
413 .reset_name = "mmu_cache", 387 .reset_name = "mmu_cache",
@@ -536,9 +510,6 @@ static struct pdata_init pdata_quirks[] __initdata = {
536 { "openpandora,omap3-pandora-600mhz", omap3_pandora_legacy_init, }, 510 { "openpandora,omap3-pandora-600mhz", omap3_pandora_legacy_init, },
537 { "openpandora,omap3-pandora-1ghz", omap3_pandora_legacy_init, }, 511 { "openpandora,omap3-pandora-1ghz", omap3_pandora_legacy_init, },
538#endif 512#endif
539#ifdef CONFIG_SOC_TI81XX
540 { "hp,t410", t410_abort_init, },
541#endif
542#ifdef CONFIG_SOC_OMAP5 513#ifdef CONFIG_SOC_OMAP5
543 { "ti,omap5-uevm", omap5_uevm_legacy_init, }, 514 { "ti,omap5-uevm", omap5_uevm_legacy_init, },
544#endif 515#endif
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 87b98bf92366..2dbd3785ee6f 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -301,11 +301,11 @@ static void omap3_pm_idle(void)
301 if (omap_irq_pending()) 301 if (omap_irq_pending())
302 return; 302 return;
303 303
304 trace_cpu_idle(1, smp_processor_id()); 304 trace_cpu_idle_rcuidle(1, smp_processor_id());
305 305
306 omap_sram_idle(); 306 omap_sram_idle();
307 307
308 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); 308 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
309} 309}
310 310
311#ifdef CONFIG_SUSPEND 311#ifdef CONFIG_SUSPEND
diff --git a/arch/arm/mach-orion5x/include/mach/entry-macro.S b/arch/arm/mach-orion5x/include/mach/entry-macro.S
index 79eb502a1e64..73919a36b577 100644
--- a/arch/arm/mach-orion5x/include/mach/entry-macro.S
+++ b/arch/arm/mach-orion5x/include/mach/entry-macro.S
@@ -21,5 +21,5 @@
21 @ find cause bits that are unmasked 21 @ find cause bits that are unmasked
22 ands \irqstat, \irqstat, \tmp @ clear Z flag if any 22 ands \irqstat, \irqstat, \tmp @ clear Z flag if any
23 clzne \irqnr, \irqstat @ calc irqnr 23 clzne \irqnr, \irqstat @ calc irqnr
24 rsbne \irqnr, \irqnr, #31 24 rsbne \irqnr, \irqnr, #32
25 .endm 25 .endm
diff --git a/arch/arm/mach-pxa/palm27x.c b/arch/arm/mach-pxa/palm27x.c
index 13eba2b26e0a..8fbfb10047ec 100644
--- a/arch/arm/mach-pxa/palm27x.c
+++ b/arch/arm/mach-pxa/palm27x.c
@@ -344,7 +344,7 @@ void __init palm27x_pwm_init(int bl, int lcd)
344{ 344{
345 palm_bl_power = bl; 345 palm_bl_power = bl;
346 palm_lcd_power = lcd; 346 palm_lcd_power = lcd;
347 pwm_add_lookup(palm27x_pwm_lookup, ARRAY_SIZE(palm27x_pwm_lookup)); 347 pwm_add_table(palm27x_pwm_lookup, ARRAY_SIZE(palm27x_pwm_lookup));
348 platform_device_register(&palm27x_backlight); 348 platform_device_register(&palm27x_backlight);
349} 349}
350#endif 350#endif
diff --git a/arch/arm/mach-pxa/palmtc.c b/arch/arm/mach-pxa/palmtc.c
index aebf6de62468..0b5c3876720c 100644
--- a/arch/arm/mach-pxa/palmtc.c
+++ b/arch/arm/mach-pxa/palmtc.c
@@ -169,7 +169,7 @@ static inline void palmtc_keys_init(void) {}
169#if defined(CONFIG_BACKLIGHT_PWM) || defined(CONFIG_BACKLIGHT_PWM_MODULE) 169#if defined(CONFIG_BACKLIGHT_PWM) || defined(CONFIG_BACKLIGHT_PWM_MODULE)
170static struct pwm_lookup palmtc_pwm_lookup[] = { 170static struct pwm_lookup palmtc_pwm_lookup[] = {
171 PWM_LOOKUP("pxa25x-pwm.1", 0, "pwm-backlight.0", NULL, PALMTC_PERIOD_NS, 171 PWM_LOOKUP("pxa25x-pwm.1", 0, "pwm-backlight.0", NULL, PALMTC_PERIOD_NS,
172 PWM_PERIOD_NORMAL), 172 PWM_POLARITY_NORMAL),
173}; 173};
174 174
175static struct platform_pwm_backlight_data palmtc_backlight_data = { 175static struct platform_pwm_backlight_data palmtc_backlight_data = {
diff --git a/arch/arm/mach-shmobile/setup-r8a7793.c b/arch/arm/mach-shmobile/setup-r8a7793.c
index 1d2825cb7a65..5fce87f7f254 100644
--- a/arch/arm/mach-shmobile/setup-r8a7793.c
+++ b/arch/arm/mach-shmobile/setup-r8a7793.c
@@ -19,7 +19,7 @@
19#include "common.h" 19#include "common.h"
20#include "rcar-gen2.h" 20#include "rcar-gen2.h"
21 21
22static const char *r8a7793_boards_compat_dt[] __initconst = { 22static const char * const r8a7793_boards_compat_dt[] __initconst = {
23 "renesas,r8a7793", 23 "renesas,r8a7793",
24 NULL, 24 NULL,
25}; 25};
diff --git a/arch/arm/mach-zx/Kconfig b/arch/arm/mach-zx/Kconfig
index 7fdc5bf24f9b..446334a25cf5 100644
--- a/arch/arm/mach-zx/Kconfig
+++ b/arch/arm/mach-zx/Kconfig
@@ -13,7 +13,7 @@ config SOC_ZX296702
13 select ARM_GLOBAL_TIMER 13 select ARM_GLOBAL_TIMER
14 select HAVE_ARM_SCU if SMP 14 select HAVE_ARM_SCU if SMP
15 select HAVE_ARM_TWD if SMP 15 select HAVE_ARM_TWD if SMP
16 select PM_GENERIC_DOMAINS 16 select PM_GENERIC_DOMAINS if PM
17 help 17 help
18 Support for ZTE ZX296702 SoC which is a dual core CortexA9MP 18 Support for ZTE ZX296702 SoC which is a dual core CortexA9MP
19endif 19endif
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 2f4b14cfddb4..591f9db3bf40 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -1061,7 +1061,7 @@ void bpf_jit_compile(struct bpf_prog *fp)
1061 } 1061 }
1062 build_epilogue(&ctx); 1062 build_epilogue(&ctx);
1063 1063
1064 flush_icache_range((u32)ctx.target, (u32)(ctx.target + ctx.idx)); 1064 flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx));
1065 1065
1066#if __LINUX_ARM_ARCH__ < 7 1066#if __LINUX_ARM_ARCH__ < 7
1067 if (ctx.imm_count) 1067 if (ctx.imm_count)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 9ac16a482ff1..871f21783866 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -49,7 +49,7 @@ config ARM64
49 select HAVE_ARCH_AUDITSYSCALL 49 select HAVE_ARCH_AUDITSYSCALL
50 select HAVE_ARCH_BITREVERSE 50 select HAVE_ARCH_BITREVERSE
51 select HAVE_ARCH_JUMP_LABEL 51 select HAVE_ARCH_JUMP_LABEL
52 select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP 52 select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
53 select HAVE_ARCH_KGDB 53 select HAVE_ARCH_KGDB
54 select HAVE_ARCH_SECCOMP_FILTER 54 select HAVE_ARCH_SECCOMP_FILTER
55 select HAVE_ARCH_TRACEHOOK 55 select HAVE_ARCH_TRACEHOOK
@@ -316,6 +316,27 @@ config ARM64_ERRATUM_832075
316 316
317 If unsure, say Y. 317 If unsure, say Y.
318 318
319config ARM64_ERRATUM_834220
320 bool "Cortex-A57: 834220: Stage 2 translation fault might be incorrectly reported in presence of a Stage 1 fault"
321 depends on KVM
322 default y
323 help
324 This option adds an alternative code sequence to work around ARM
325 erratum 834220 on Cortex-A57 parts up to r1p2.
326
327 Affected Cortex-A57 parts might report a Stage 2 translation
328 fault as the result of a Stage 1 fault for load crossing a
329 page boundary when there is a permission or device memory
330 alignment fault at Stage 1 and a translation fault at Stage 2.
331
332 The workaround is to verify that the Stage 1 translation
333 doesn't generate a fault before handling the Stage 2 fault.
334 Please note that this does not necessarily enable the workaround,
335 as it depends on the alternative framework, which will only patch
336 the kernel if an affected CPU is detected.
337
338 If unsure, say Y.
339
319config ARM64_ERRATUM_845719 340config ARM64_ERRATUM_845719
320 bool "Cortex-A53: 845719: a load might read incorrect data" 341 bool "Cortex-A53: 845719: a load might read incorrect data"
321 depends on COMPAT 342 depends on COMPAT
diff --git a/arch/arm64/crypto/aes-ce-cipher.c b/arch/arm64/crypto/aes-ce-cipher.c
index ce47792a983d..f7bd9bf0bbb3 100644
--- a/arch/arm64/crypto/aes-ce-cipher.c
+++ b/arch/arm64/crypto/aes-ce-cipher.c
@@ -237,7 +237,7 @@ EXPORT_SYMBOL(ce_aes_setkey);
237static struct crypto_alg aes_alg = { 237static struct crypto_alg aes_alg = {
238 .cra_name = "aes", 238 .cra_name = "aes",
239 .cra_driver_name = "aes-ce", 239 .cra_driver_name = "aes-ce",
240 .cra_priority = 300, 240 .cra_priority = 250,
241 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 241 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
242 .cra_blocksize = AES_BLOCK_SIZE, 242 .cra_blocksize = AES_BLOCK_SIZE,
243 .cra_ctxsize = sizeof(struct crypto_aes_ctx), 243 .cra_ctxsize = sizeof(struct crypto_aes_ctx),
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 624f9679f4b0..9622eb48f894 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -64,27 +64,31 @@ do { \
64 64
65#define smp_load_acquire(p) \ 65#define smp_load_acquire(p) \
66({ \ 66({ \
67 typeof(*p) ___p1; \ 67 union { typeof(*p) __val; char __c[1]; } __u; \
68 compiletime_assert_atomic_type(*p); \ 68 compiletime_assert_atomic_type(*p); \
69 switch (sizeof(*p)) { \ 69 switch (sizeof(*p)) { \
70 case 1: \ 70 case 1: \
71 asm volatile ("ldarb %w0, %1" \ 71 asm volatile ("ldarb %w0, %1" \
72 : "=r" (___p1) : "Q" (*p) : "memory"); \ 72 : "=r" (*(__u8 *)__u.__c) \
73 : "Q" (*p) : "memory"); \
73 break; \ 74 break; \
74 case 2: \ 75 case 2: \
75 asm volatile ("ldarh %w0, %1" \ 76 asm volatile ("ldarh %w0, %1" \
76 : "=r" (___p1) : "Q" (*p) : "memory"); \ 77 : "=r" (*(__u16 *)__u.__c) \
78 : "Q" (*p) : "memory"); \
77 break; \ 79 break; \
78 case 4: \ 80 case 4: \
79 asm volatile ("ldar %w0, %1" \ 81 asm volatile ("ldar %w0, %1" \
80 : "=r" (___p1) : "Q" (*p) : "memory"); \ 82 : "=r" (*(__u32 *)__u.__c) \
83 : "Q" (*p) : "memory"); \
81 break; \ 84 break; \
82 case 8: \ 85 case 8: \
83 asm volatile ("ldar %0, %1" \ 86 asm volatile ("ldar %0, %1" \
84 : "=r" (___p1) : "Q" (*p) : "memory"); \ 87 : "=r" (*(__u64 *)__u.__c) \
88 : "Q" (*p) : "memory"); \
85 break; \ 89 break; \
86 } \ 90 } \
87 ___p1; \ 91 __u.__val; \
88}) 92})
89 93
90#define read_barrier_depends() do { } while(0) 94#define read_barrier_depends() do { } while(0)
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index 7fbed6919b54..eb8432bb82b8 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -23,7 +23,6 @@
23 */ 23 */
24#include <linux/types.h> 24#include <linux/types.h>
25#include <linux/sched.h> 25#include <linux/sched.h>
26#include <linux/ptrace.h>
27 26
28#define COMPAT_USER_HZ 100 27#define COMPAT_USER_HZ 100
29#ifdef __AARCH64EB__ 28#ifdef __AARCH64EB__
@@ -234,7 +233,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
234 return (u32)(unsigned long)uptr; 233 return (u32)(unsigned long)uptr;
235} 234}
236 235
237#define compat_user_stack_pointer() (user_stack_pointer(current_pt_regs())) 236#define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current)))
238 237
239static inline void __user *arch_compat_alloc_user_space(long len) 238static inline void __user *arch_compat_alloc_user_space(long len)
240{ 239{
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 11d5bb0fdd54..8f271b83f910 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -29,8 +29,9 @@
29#define ARM64_HAS_PAN 4 29#define ARM64_HAS_PAN 4
30#define ARM64_HAS_LSE_ATOMICS 5 30#define ARM64_HAS_LSE_ATOMICS 5
31#define ARM64_WORKAROUND_CAVIUM_23154 6 31#define ARM64_WORKAROUND_CAVIUM_23154 6
32#define ARM64_WORKAROUND_834220 7
32 33
33#define ARM64_NCAPS 7 34#define ARM64_NCAPS 8
34 35
35#ifndef __ASSEMBLY__ 36#ifndef __ASSEMBLY__
36 37
@@ -46,8 +47,12 @@ enum ftr_type {
46#define FTR_STRICT true /* SANITY check strict matching required */ 47#define FTR_STRICT true /* SANITY check strict matching required */
47#define FTR_NONSTRICT false /* SANITY check ignored */ 48#define FTR_NONSTRICT false /* SANITY check ignored */
48 49
50#define FTR_SIGNED true /* Value should be treated as signed */
51#define FTR_UNSIGNED false /* Value should be treated as unsigned */
52
49struct arm64_ftr_bits { 53struct arm64_ftr_bits {
50 bool strict; /* CPU Sanity check: strict matching required ? */ 54 bool sign; /* Value is signed ? */
55 bool strict; /* CPU Sanity check: strict matching required ? */
51 enum ftr_type type; 56 enum ftr_type type;
52 u8 shift; 57 u8 shift;
53 u8 width; 58 u8 width;
@@ -123,6 +128,18 @@ cpuid_feature_extract_field(u64 features, int field)
123 return cpuid_feature_extract_field_width(features, field, 4); 128 return cpuid_feature_extract_field_width(features, field, 4);
124} 129}
125 130
131static inline unsigned int __attribute_const__
132cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width)
133{
134 return (u64)(features << (64 - width - field)) >> (64 - width);
135}
136
137static inline unsigned int __attribute_const__
138cpuid_feature_extract_unsigned_field(u64 features, int field)
139{
140 return cpuid_feature_extract_unsigned_field_width(features, field, 4);
141}
142
126static inline u64 arm64_ftr_mask(struct arm64_ftr_bits *ftrp) 143static inline u64 arm64_ftr_mask(struct arm64_ftr_bits *ftrp)
127{ 144{
128 return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift); 145 return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift);
@@ -130,7 +147,9 @@ static inline u64 arm64_ftr_mask(struct arm64_ftr_bits *ftrp)
130 147
131static inline s64 arm64_ftr_value(struct arm64_ftr_bits *ftrp, u64 val) 148static inline s64 arm64_ftr_value(struct arm64_ftr_bits *ftrp, u64 val)
132{ 149{
133 return cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width); 150 return ftrp->sign ?
151 cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width) :
152 cpuid_feature_extract_unsigned_field_width(val, ftrp->shift, ftrp->width);
134} 153}
135 154
136static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0) 155static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index 54d0ead41afc..61e08f360e31 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -18,7 +18,6 @@
18 18
19#ifdef __KERNEL__ 19#ifdef __KERNEL__
20 20
21#include <linux/acpi.h>
22#include <linux/types.h> 21#include <linux/types.h>
23#include <linux/vmalloc.h> 22#include <linux/vmalloc.h>
24 23
@@ -26,22 +25,16 @@
26#include <asm/xen/hypervisor.h> 25#include <asm/xen/hypervisor.h>
27 26
28#define DMA_ERROR_CODE (~(dma_addr_t)0) 27#define DMA_ERROR_CODE (~(dma_addr_t)0)
29extern struct dma_map_ops *dma_ops;
30extern struct dma_map_ops dummy_dma_ops; 28extern struct dma_map_ops dummy_dma_ops;
31 29
32static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) 30static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
33{ 31{
34 if (unlikely(!dev)) 32 if (dev && dev->archdata.dma_ops)
35 return dma_ops;
36 else if (dev->archdata.dma_ops)
37 return dev->archdata.dma_ops; 33 return dev->archdata.dma_ops;
38 else if (acpi_disabled)
39 return dma_ops;
40 34
41 /* 35 /*
42 * When ACPI is enabled, if arch_set_dma_ops is not called, 36 * We expect no ISA devices, and all other DMA masters are expected to
43 * we will disable device DMA capability by setting it 37 * have someone call arch_setup_dma_ops at device creation time.
44 * to dummy_dma_ops.
45 */ 38 */
46 return &dummy_dma_ops; 39 return &dummy_dma_ops;
47} 40}
diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h
index e54415ec6935..9732908bfc8a 100644
--- a/arch/arm64/include/asm/hw_breakpoint.h
+++ b/arch/arm64/include/asm/hw_breakpoint.h
@@ -138,16 +138,18 @@ extern struct pmu perf_ops_bp;
138/* Determine number of BRP registers available. */ 138/* Determine number of BRP registers available. */
139static inline int get_num_brps(void) 139static inline int get_num_brps(void)
140{ 140{
141 u64 dfr0 = read_system_reg(SYS_ID_AA64DFR0_EL1);
141 return 1 + 142 return 1 +
142 cpuid_feature_extract_field(read_system_reg(SYS_ID_AA64DFR0_EL1), 143 cpuid_feature_extract_unsigned_field(dfr0,
143 ID_AA64DFR0_BRPS_SHIFT); 144 ID_AA64DFR0_BRPS_SHIFT);
144} 145}
145 146
146/* Determine number of WRP registers available. */ 147/* Determine number of WRP registers available. */
147static inline int get_num_wrps(void) 148static inline int get_num_wrps(void)
148{ 149{
150 u64 dfr0 = read_system_reg(SYS_ID_AA64DFR0_EL1);
149 return 1 + 151 return 1 +
150 cpuid_feature_extract_field(read_system_reg(SYS_ID_AA64DFR0_EL1), 152 cpuid_feature_extract_unsigned_field(dfr0,
151 ID_AA64DFR0_WRPS_SHIFT); 153 ID_AA64DFR0_WRPS_SHIFT);
152} 154}
153 155
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h
index 23eb450b820b..8e8d30684392 100644
--- a/arch/arm64/include/asm/irq.h
+++ b/arch/arm64/include/asm/irq.h
@@ -7,4 +7,9 @@ struct pt_regs;
7 7
8extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); 8extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
9 9
10static inline int nr_legacy_irqs(void)
11{
12 return 0;
13}
14
10#endif 15#endif
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 17e92f05b1fe..25a40213bd9b 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -99,12 +99,22 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
99 *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT; 99 *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
100} 100}
101 101
102static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num) 102/*
103 * vcpu_get_reg and vcpu_set_reg should always be passed a register number
104 * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
105 * AArch32 with banked registers.
106 */
107static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
108 u8 reg_num)
103{ 109{
104 if (vcpu_mode_is_32bit(vcpu)) 110 return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
105 return vcpu_reg32(vcpu, reg_num); 111}
106 112
107 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num]; 113static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
114 unsigned long val)
115{
116 if (reg_num != 31)
117 vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val;
108} 118}
109 119
110/* Get vcpu SPSR for current mode */ 120/* Get vcpu SPSR for current mode */
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index c0e87898ba96..24165784b803 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -101,7 +101,7 @@ static inline void cpu_set_default_tcr_t0sz(void)
101#define destroy_context(mm) do { } while(0) 101#define destroy_context(mm) do { } while(0)
102void check_and_switch_context(struct mm_struct *mm, unsigned int cpu); 102void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
103 103
104#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) 104#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; })
105 105
106/* 106/*
107 * This is called when "tsk" is about to enter lazy TLB mode. 107 * This is called when "tsk" is about to enter lazy TLB mode.
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 9819a9426b69..7e074f93f383 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -81,6 +81,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
81 81
82#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE) 82#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
83#define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY) 83#define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
84#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
84#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE) 85#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
85#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT) 86#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
86 87
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 24926f2504f7..feb6b4efa641 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -75,6 +75,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
75 (1 << MIDR_VARIANT_SHIFT) | 2), 75 (1 << MIDR_VARIANT_SHIFT) | 2),
76 }, 76 },
77#endif 77#endif
78#ifdef CONFIG_ARM64_ERRATUM_834220
79 {
80 /* Cortex-A57 r0p0 - r1p2 */
81 .desc = "ARM erratum 834220",
82 .capability = ARM64_WORKAROUND_834220,
83 MIDR_RANGE(MIDR_CORTEX_A57, 0x00,
84 (1 << MIDR_VARIANT_SHIFT) | 2),
85 },
86#endif
78#ifdef CONFIG_ARM64_ERRATUM_845719 87#ifdef CONFIG_ARM64_ERRATUM_845719
79 { 88 {
80 /* Cortex-A53 r0p[01234] */ 89 /* Cortex-A53 r0p[01234] */
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index c8cf89223b5a..0669c63281ea 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -44,8 +44,9 @@ unsigned int compat_elf_hwcap2 __read_mostly;
44 44
45DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); 45DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
46 46
47#define ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ 47#define __ARM64_FTR_BITS(SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
48 { \ 48 { \
49 .sign = SIGNED, \
49 .strict = STRICT, \ 50 .strict = STRICT, \
50 .type = TYPE, \ 51 .type = TYPE, \
51 .shift = SHIFT, \ 52 .shift = SHIFT, \
@@ -53,6 +54,14 @@ DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
53 .safe_val = SAFE_VAL, \ 54 .safe_val = SAFE_VAL, \
54 } 55 }
55 56
57/* Define a feature with signed values */
58#define ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
59 __ARM64_FTR_BITS(FTR_SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
60
61/* Define a feature with unsigned value */
62#define U_ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
63 __ARM64_FTR_BITS(FTR_UNSIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
64
56#define ARM64_FTR_END \ 65#define ARM64_FTR_END \
57 { \ 66 { \
58 .width = 0, \ 67 .width = 0, \
@@ -99,7 +108,7 @@ static struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
99 * Differing PARange is fine as long as all peripherals and memory are mapped 108 * Differing PARange is fine as long as all peripherals and memory are mapped
100 * within the minimum PARange of all CPUs 109 * within the minimum PARange of all CPUs
101 */ 110 */
102 ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0), 111 U_ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0),
103 ARM64_FTR_END, 112 ARM64_FTR_END,
104}; 113};
105 114
@@ -115,18 +124,18 @@ static struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
115}; 124};
116 125
117static struct arm64_ftr_bits ftr_ctr[] = { 126static struct arm64_ftr_bits ftr_ctr[] = {
118 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */ 127 U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */
119 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0), 128 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0),
120 ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */ 129 U_ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */
121 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */ 130 U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */
122 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */ 131 U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */
123 /* 132 /*
124 * Linux can handle differing I-cache policies. Userspace JITs will 133 * Linux can handle differing I-cache policies. Userspace JITs will
125 * make use of *minLine 134 * make use of *minLine
126 */ 135 */
127 ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, 0), /* L1Ip */ 136 U_ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, 0), /* L1Ip */
128 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 10, 0), /* RAZ */ 137 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 10, 0), /* RAZ */
129 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */ 138 U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */
130 ARM64_FTR_END, 139 ARM64_FTR_END,
131}; 140};
132 141
@@ -144,12 +153,12 @@ static struct arm64_ftr_bits ftr_id_mmfr0[] = {
144 153
145static struct arm64_ftr_bits ftr_id_aa64dfr0[] = { 154static struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
146 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0), 155 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
147 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0), 156 U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
148 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0), 157 U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
149 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0), 158 U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
150 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0), 159 U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
151 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0), 160 U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
152 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6), 161 U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
153 ARM64_FTR_END, 162 ARM64_FTR_END,
154}; 163};
155 164
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 706679d0a0b4..212ae6361d8b 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -30,6 +30,7 @@
30#include <linux/seq_file.h> 30#include <linux/seq_file.h>
31#include <linux/sched.h> 31#include <linux/sched.h>
32#include <linux/smp.h> 32#include <linux/smp.h>
33#include <linux/delay.h>
33 34
34/* 35/*
35 * In case the boot CPU is hotpluggable, we record its initial state and 36 * In case the boot CPU is hotpluggable, we record its initial state and
@@ -112,6 +113,10 @@ static int c_show(struct seq_file *m, void *v)
112 */ 113 */
113 seq_printf(m, "processor\t: %d\n", i); 114 seq_printf(m, "processor\t: %d\n", i);
114 115
116 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
117 loops_per_jiffy / (500000UL/HZ),
118 loops_per_jiffy / (5000UL/HZ) % 100);
119
115 /* 120 /*
116 * Dump out the common processor features in a single line. 121 * Dump out the common processor features in a single line.
117 * Userspace should read the hwcaps with getauxval(AT_HWCAP) 122 * Userspace should read the hwcaps with getauxval(AT_HWCAP)
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index de46b50f4cdf..4eeb17198cfa 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -127,7 +127,11 @@ static int __init uefi_init(void)
127 table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables; 127 table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables;
128 config_tables = early_memremap(efi_to_phys(efi.systab->tables), 128 config_tables = early_memremap(efi_to_phys(efi.systab->tables),
129 table_size); 129 table_size);
130 130 if (config_tables == NULL) {
131 pr_warn("Unable to map EFI config table array.\n");
132 retval = -ENOMEM;
133 goto out;
134 }
131 retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables, 135 retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables,
132 sizeof(efi_config_table_64_t), NULL); 136 sizeof(efi_config_table_64_t), NULL);
133 137
@@ -209,6 +213,14 @@ void __init efi_init(void)
209 PAGE_ALIGN(params.mmap_size + (params.mmap & ~PAGE_MASK))); 213 PAGE_ALIGN(params.mmap_size + (params.mmap & ~PAGE_MASK)));
210 memmap.phys_map = params.mmap; 214 memmap.phys_map = params.mmap;
211 memmap.map = early_memremap(params.mmap, params.mmap_size); 215 memmap.map = early_memremap(params.mmap, params.mmap_size);
216 if (memmap.map == NULL) {
217 /*
218 * If we are booting via UEFI, the UEFI memory map is the only
219 * description of memory we have, so there is little point in
220 * proceeding if we cannot access it.
221 */
222 panic("Unable to map EFI memory map.\n");
223 }
212 memmap.map_end = memmap.map + params.mmap_size; 224 memmap.map_end = memmap.map + params.mmap_size;
213 memmap.desc_size = params.desc_size; 225 memmap.desc_size = params.desc_size;
214 memmap.desc_version = params.desc_ver; 226 memmap.desc_version = params.desc_ver;
@@ -224,8 +236,9 @@ static bool __init efi_virtmap_init(void)
224{ 236{
225 efi_memory_desc_t *md; 237 efi_memory_desc_t *md;
226 238
239 init_new_context(NULL, &efi_mm);
240
227 for_each_efi_memory_desc(&memmap, md) { 241 for_each_efi_memory_desc(&memmap, md) {
228 u64 paddr, npages, size;
229 pgprot_t prot; 242 pgprot_t prot;
230 243
231 if (!(md->attribute & EFI_MEMORY_RUNTIME)) 244 if (!(md->attribute & EFI_MEMORY_RUNTIME))
@@ -233,11 +246,6 @@ static bool __init efi_virtmap_init(void)
233 if (md->virt_addr == 0) 246 if (md->virt_addr == 0)
234 return false; 247 return false;
235 248
236 paddr = md->phys_addr;
237 npages = md->num_pages;
238 memrange_efi_to_native(&paddr, &npages);
239 size = npages << PAGE_SHIFT;
240
241 pr_info(" EFI remap 0x%016llx => %p\n", 249 pr_info(" EFI remap 0x%016llx => %p\n",
242 md->phys_addr, (void *)md->virt_addr); 250 md->phys_addr, (void *)md->virt_addr);
243 251
@@ -254,7 +262,9 @@ static bool __init efi_virtmap_init(void)
254 else 262 else
255 prot = PAGE_KERNEL; 263 prot = PAGE_KERNEL;
256 264
257 create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, prot); 265 create_pgd_mapping(&efi_mm, md->phys_addr, md->virt_addr,
266 md->num_pages << EFI_PAGE_SHIFT,
267 __pgprot(pgprot_val(prot) | PTE_NG));
258 } 268 }
259 return true; 269 return true;
260} 270}
@@ -270,12 +280,12 @@ static int __init arm64_enable_runtime_services(void)
270 280
271 if (!efi_enabled(EFI_BOOT)) { 281 if (!efi_enabled(EFI_BOOT)) {
272 pr_info("EFI services will not be available.\n"); 282 pr_info("EFI services will not be available.\n");
273 return -1; 283 return 0;
274 } 284 }
275 285
276 if (efi_runtime_disabled()) { 286 if (efi_runtime_disabled()) {
277 pr_info("EFI runtime services will be disabled.\n"); 287 pr_info("EFI runtime services will be disabled.\n");
278 return -1; 288 return 0;
279 } 289 }
280 290
281 pr_info("Remapping and enabling EFI services.\n"); 291 pr_info("Remapping and enabling EFI services.\n");
@@ -285,7 +295,7 @@ static int __init arm64_enable_runtime_services(void)
285 mapsize); 295 mapsize);
286 if (!memmap.map) { 296 if (!memmap.map) {
287 pr_err("Failed to remap EFI memory map\n"); 297 pr_err("Failed to remap EFI memory map\n");
288 return -1; 298 return -ENOMEM;
289 } 299 }
290 memmap.map_end = memmap.map + mapsize; 300 memmap.map_end = memmap.map + mapsize;
291 efi.memmap = &memmap; 301 efi.memmap = &memmap;
@@ -294,13 +304,13 @@ static int __init arm64_enable_runtime_services(void)
294 sizeof(efi_system_table_t)); 304 sizeof(efi_system_table_t));
295 if (!efi.systab) { 305 if (!efi.systab) {
296 pr_err("Failed to remap EFI System Table\n"); 306 pr_err("Failed to remap EFI System Table\n");
297 return -1; 307 return -ENOMEM;
298 } 308 }
299 set_bit(EFI_SYSTEM_TABLES, &efi.flags); 309 set_bit(EFI_SYSTEM_TABLES, &efi.flags);
300 310
301 if (!efi_virtmap_init()) { 311 if (!efi_virtmap_init()) {
302 pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n"); 312 pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n");
303 return -1; 313 return -ENOMEM;
304 } 314 }
305 315
306 /* Set up runtime services function pointers */ 316 /* Set up runtime services function pointers */
@@ -329,14 +339,7 @@ core_initcall(arm64_dmi_init);
329 339
330static void efi_set_pgd(struct mm_struct *mm) 340static void efi_set_pgd(struct mm_struct *mm)
331{ 341{
332 if (mm == &init_mm) 342 switch_mm(NULL, mm, NULL);
333 cpu_set_reserved_ttbr0();
334 else
335 cpu_switch_mm(mm->pgd, mm);
336
337 local_flush_tlb_all();
338 if (icache_is_aivivt())
339 __local_flush_icache_all();
340} 343}
341 344
342void efi_virtmap_load(void) 345void efi_virtmap_load(void)
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index fce95e17cf7f..1095aa483a1c 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -1,3 +1,4 @@
1#include <linux/ftrace.h>
1#include <linux/percpu.h> 2#include <linux/percpu.h>
2#include <linux/slab.h> 3#include <linux/slab.h>
3#include <asm/cacheflush.h> 4#include <asm/cacheflush.h>
@@ -71,6 +72,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
71 local_dbg_save(flags); 72 local_dbg_save(flags);
72 73
73 /* 74 /*
75 * Function graph tracer state gets incosistent when the kernel
76 * calls functions that never return (aka suspend finishers) hence
77 * disable graph tracing during their execution.
78 */
79 pause_graph_tracing();
80
81 /*
74 * mm context saved on the stack, it will be restored when 82 * mm context saved on the stack, it will be restored when
75 * the cpu comes out of reset through the identity mapped 83 * the cpu comes out of reset through the identity mapped
76 * page tables, so that the thread address space is properly 84 * page tables, so that the thread address space is properly
@@ -111,6 +119,8 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
111 hw_breakpoint_restore(NULL); 119 hw_breakpoint_restore(NULL);
112 } 120 }
113 121
122 unpause_graph_tracing();
123
114 /* 124 /*
115 * Restore pstate flags. OS lock and mdscr have been already 125 * Restore pstate flags. OS lock and mdscr have been already
116 * restored, so from this point onwards, debugging is fully 126 * restored, so from this point onwards, debugging is fully
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 68a0759b1375..15f0477b0d2a 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -37,7 +37,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
37{ 37{
38 int ret; 38 int ret;
39 39
40 trace_kvm_hvc_arm64(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0), 40 trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0),
41 kvm_vcpu_hvc_get_imm(vcpu)); 41 kvm_vcpu_hvc_get_imm(vcpu));
42 42
43 ret = kvm_psci_call(vcpu); 43 ret = kvm_psci_call(vcpu);
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 1599701ef044..86c289832272 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -864,6 +864,10 @@ ENTRY(__kvm_flush_vm_context)
864ENDPROC(__kvm_flush_vm_context) 864ENDPROC(__kvm_flush_vm_context)
865 865
866__kvm_hyp_panic: 866__kvm_hyp_panic:
867 // Stash PAR_EL1 before corrupting it in __restore_sysregs
868 mrs x0, par_el1
869 push x0, xzr
870
867 // Guess the context by looking at VTTBR: 871 // Guess the context by looking at VTTBR:
868 // If zero, then we're already a host. 872 // If zero, then we're already a host.
869 // Otherwise restore a minimal host context before panicing. 873 // Otherwise restore a minimal host context before panicing.
@@ -898,7 +902,7 @@ __kvm_hyp_panic:
898 mrs x3, esr_el2 902 mrs x3, esr_el2
899 mrs x4, far_el2 903 mrs x4, far_el2
900 mrs x5, hpfar_el2 904 mrs x5, hpfar_el2
901 mrs x6, par_el1 905 pop x6, xzr // active context PAR_EL1
902 mrs x7, tpidr_el2 906 mrs x7, tpidr_el2
903 907
904 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ 908 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
@@ -914,7 +918,7 @@ __kvm_hyp_panic:
914ENDPROC(__kvm_hyp_panic) 918ENDPROC(__kvm_hyp_panic)
915 919
916__hyp_panic_str: 920__hyp_panic_str:
917 .ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0" 921 .ascii "HYP panic:\nPS:%08x PC:%016x ESR:%08x\nFAR:%016x HPFAR:%016x PAR:%016x\nVCPU:%p\n\0"
918 922
919 .align 2 923 .align 2
920 924
@@ -1015,9 +1019,15 @@ el1_trap:
1015 b.ne 1f // Not an abort we care about 1019 b.ne 1f // Not an abort we care about
1016 1020
1017 /* This is an abort. Check for permission fault */ 1021 /* This is an abort. Check for permission fault */
1022alternative_if_not ARM64_WORKAROUND_834220
1018 and x2, x1, #ESR_ELx_FSC_TYPE 1023 and x2, x1, #ESR_ELx_FSC_TYPE
1019 cmp x2, #FSC_PERM 1024 cmp x2, #FSC_PERM
1020 b.ne 1f // Not a permission fault 1025 b.ne 1f // Not a permission fault
1026alternative_else
1027 nop // Use the permission fault path to
1028 nop // check for a valid S1 translation,
1029 nop // regardless of the ESR value.
1030alternative_endif
1021 1031
1022 /* 1032 /*
1023 * Check for Stage-1 page table walk, which is guaranteed 1033 * Check for Stage-1 page table walk, which is guaranteed
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index 85c57158dcd9..648112e90ed5 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -48,7 +48,7 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
48 48
49 /* Note: These now point to the banked copies */ 49 /* Note: These now point to the banked copies */
50 *vcpu_spsr(vcpu) = new_spsr_value; 50 *vcpu_spsr(vcpu) = new_spsr_value;
51 *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; 51 *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
52 52
53 /* Branch to exception vector */ 53 /* Branch to exception vector */
54 if (sctlr & (1 << 13)) 54 if (sctlr & (1 << 13))
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 87a64e8db04c..d2650e84faf2 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -78,7 +78,7 @@ static u32 get_ccsidr(u32 csselr)
78 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). 78 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
79 */ 79 */
80static bool access_dcsw(struct kvm_vcpu *vcpu, 80static bool access_dcsw(struct kvm_vcpu *vcpu,
81 const struct sys_reg_params *p, 81 struct sys_reg_params *p,
82 const struct sys_reg_desc *r) 82 const struct sys_reg_desc *r)
83{ 83{
84 if (!p->is_write) 84 if (!p->is_write)
@@ -94,21 +94,19 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
94 * sys_regs and leave it in complete control of the caches. 94 * sys_regs and leave it in complete control of the caches.
95 */ 95 */
96static bool access_vm_reg(struct kvm_vcpu *vcpu, 96static bool access_vm_reg(struct kvm_vcpu *vcpu,
97 const struct sys_reg_params *p, 97 struct sys_reg_params *p,
98 const struct sys_reg_desc *r) 98 const struct sys_reg_desc *r)
99{ 99{
100 unsigned long val;
101 bool was_enabled = vcpu_has_cache_enabled(vcpu); 100 bool was_enabled = vcpu_has_cache_enabled(vcpu);
102 101
103 BUG_ON(!p->is_write); 102 BUG_ON(!p->is_write);
104 103
105 val = *vcpu_reg(vcpu, p->Rt);
106 if (!p->is_aarch32) { 104 if (!p->is_aarch32) {
107 vcpu_sys_reg(vcpu, r->reg) = val; 105 vcpu_sys_reg(vcpu, r->reg) = p->regval;
108 } else { 106 } else {
109 if (!p->is_32bit) 107 if (!p->is_32bit)
110 vcpu_cp15_64_high(vcpu, r->reg) = val >> 32; 108 vcpu_cp15_64_high(vcpu, r->reg) = upper_32_bits(p->regval);
111 vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL; 109 vcpu_cp15_64_low(vcpu, r->reg) = lower_32_bits(p->regval);
112 } 110 }
113 111
114 kvm_toggle_cache(vcpu, was_enabled); 112 kvm_toggle_cache(vcpu, was_enabled);
@@ -122,22 +120,19 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
122 * for both AArch64 and AArch32 accesses. 120 * for both AArch64 and AArch32 accesses.
123 */ 121 */
124static bool access_gic_sgi(struct kvm_vcpu *vcpu, 122static bool access_gic_sgi(struct kvm_vcpu *vcpu,
125 const struct sys_reg_params *p, 123 struct sys_reg_params *p,
126 const struct sys_reg_desc *r) 124 const struct sys_reg_desc *r)
127{ 125{
128 u64 val;
129
130 if (!p->is_write) 126 if (!p->is_write)
131 return read_from_write_only(vcpu, p); 127 return read_from_write_only(vcpu, p);
132 128
133 val = *vcpu_reg(vcpu, p->Rt); 129 vgic_v3_dispatch_sgi(vcpu, p->regval);
134 vgic_v3_dispatch_sgi(vcpu, val);
135 130
136 return true; 131 return true;
137} 132}
138 133
139static bool trap_raz_wi(struct kvm_vcpu *vcpu, 134static bool trap_raz_wi(struct kvm_vcpu *vcpu,
140 const struct sys_reg_params *p, 135 struct sys_reg_params *p,
141 const struct sys_reg_desc *r) 136 const struct sys_reg_desc *r)
142{ 137{
143 if (p->is_write) 138 if (p->is_write)
@@ -147,19 +142,19 @@ static bool trap_raz_wi(struct kvm_vcpu *vcpu,
147} 142}
148 143
149static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, 144static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
150 const struct sys_reg_params *p, 145 struct sys_reg_params *p,
151 const struct sys_reg_desc *r) 146 const struct sys_reg_desc *r)
152{ 147{
153 if (p->is_write) { 148 if (p->is_write) {
154 return ignore_write(vcpu, p); 149 return ignore_write(vcpu, p);
155 } else { 150 } else {
156 *vcpu_reg(vcpu, p->Rt) = (1 << 3); 151 p->regval = (1 << 3);
157 return true; 152 return true;
158 } 153 }
159} 154}
160 155
161static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu, 156static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
162 const struct sys_reg_params *p, 157 struct sys_reg_params *p,
163 const struct sys_reg_desc *r) 158 const struct sys_reg_desc *r)
164{ 159{
165 if (p->is_write) { 160 if (p->is_write) {
@@ -167,7 +162,7 @@ static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
167 } else { 162 } else {
168 u32 val; 163 u32 val;
169 asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val)); 164 asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val));
170 *vcpu_reg(vcpu, p->Rt) = val; 165 p->regval = val;
171 return true; 166 return true;
172 } 167 }
173} 168}
@@ -200,17 +195,17 @@ static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
200 * now use the debug registers. 195 * now use the debug registers.
201 */ 196 */
202static bool trap_debug_regs(struct kvm_vcpu *vcpu, 197static bool trap_debug_regs(struct kvm_vcpu *vcpu,
203 const struct sys_reg_params *p, 198 struct sys_reg_params *p,
204 const struct sys_reg_desc *r) 199 const struct sys_reg_desc *r)
205{ 200{
206 if (p->is_write) { 201 if (p->is_write) {
207 vcpu_sys_reg(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt); 202 vcpu_sys_reg(vcpu, r->reg) = p->regval;
208 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; 203 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
209 } else { 204 } else {
210 *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg); 205 p->regval = vcpu_sys_reg(vcpu, r->reg);
211 } 206 }
212 207
213 trace_trap_reg(__func__, r->reg, p->is_write, *vcpu_reg(vcpu, p->Rt)); 208 trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
214 209
215 return true; 210 return true;
216} 211}
@@ -225,10 +220,10 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu,
225 * hyp.S code switches between host and guest values in future. 220 * hyp.S code switches between host and guest values in future.
226 */ 221 */
227static inline void reg_to_dbg(struct kvm_vcpu *vcpu, 222static inline void reg_to_dbg(struct kvm_vcpu *vcpu,
228 const struct sys_reg_params *p, 223 struct sys_reg_params *p,
229 u64 *dbg_reg) 224 u64 *dbg_reg)
230{ 225{
231 u64 val = *vcpu_reg(vcpu, p->Rt); 226 u64 val = p->regval;
232 227
233 if (p->is_32bit) { 228 if (p->is_32bit) {
234 val &= 0xffffffffUL; 229 val &= 0xffffffffUL;
@@ -240,19 +235,16 @@ static inline void reg_to_dbg(struct kvm_vcpu *vcpu,
240} 235}
241 236
242static inline void dbg_to_reg(struct kvm_vcpu *vcpu, 237static inline void dbg_to_reg(struct kvm_vcpu *vcpu,
243 const struct sys_reg_params *p, 238 struct sys_reg_params *p,
244 u64 *dbg_reg) 239 u64 *dbg_reg)
245{ 240{
246 u64 val = *dbg_reg; 241 p->regval = *dbg_reg;
247
248 if (p->is_32bit) 242 if (p->is_32bit)
249 val &= 0xffffffffUL; 243 p->regval &= 0xffffffffUL;
250
251 *vcpu_reg(vcpu, p->Rt) = val;
252} 244}
253 245
254static inline bool trap_bvr(struct kvm_vcpu *vcpu, 246static inline bool trap_bvr(struct kvm_vcpu *vcpu,
255 const struct sys_reg_params *p, 247 struct sys_reg_params *p,
256 const struct sys_reg_desc *rd) 248 const struct sys_reg_desc *rd)
257{ 249{
258 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; 250 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
@@ -294,7 +286,7 @@ static inline void reset_bvr(struct kvm_vcpu *vcpu,
294} 286}
295 287
296static inline bool trap_bcr(struct kvm_vcpu *vcpu, 288static inline bool trap_bcr(struct kvm_vcpu *vcpu,
297 const struct sys_reg_params *p, 289 struct sys_reg_params *p,
298 const struct sys_reg_desc *rd) 290 const struct sys_reg_desc *rd)
299{ 291{
300 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; 292 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
@@ -337,7 +329,7 @@ static inline void reset_bcr(struct kvm_vcpu *vcpu,
337} 329}
338 330
339static inline bool trap_wvr(struct kvm_vcpu *vcpu, 331static inline bool trap_wvr(struct kvm_vcpu *vcpu,
340 const struct sys_reg_params *p, 332 struct sys_reg_params *p,
341 const struct sys_reg_desc *rd) 333 const struct sys_reg_desc *rd)
342{ 334{
343 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; 335 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
@@ -380,7 +372,7 @@ static inline void reset_wvr(struct kvm_vcpu *vcpu,
380} 372}
381 373
382static inline bool trap_wcr(struct kvm_vcpu *vcpu, 374static inline bool trap_wcr(struct kvm_vcpu *vcpu,
383 const struct sys_reg_params *p, 375 struct sys_reg_params *p,
384 const struct sys_reg_desc *rd) 376 const struct sys_reg_desc *rd)
385{ 377{
386 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; 378 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
@@ -687,7 +679,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
687}; 679};
688 680
689static bool trap_dbgidr(struct kvm_vcpu *vcpu, 681static bool trap_dbgidr(struct kvm_vcpu *vcpu,
690 const struct sys_reg_params *p, 682 struct sys_reg_params *p,
691 const struct sys_reg_desc *r) 683 const struct sys_reg_desc *r)
692{ 684{
693 if (p->is_write) { 685 if (p->is_write) {
@@ -697,23 +689,23 @@ static bool trap_dbgidr(struct kvm_vcpu *vcpu,
697 u64 pfr = read_system_reg(SYS_ID_AA64PFR0_EL1); 689 u64 pfr = read_system_reg(SYS_ID_AA64PFR0_EL1);
698 u32 el3 = !!cpuid_feature_extract_field(pfr, ID_AA64PFR0_EL3_SHIFT); 690 u32 el3 = !!cpuid_feature_extract_field(pfr, ID_AA64PFR0_EL3_SHIFT);
699 691
700 *vcpu_reg(vcpu, p->Rt) = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) | 692 p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
701 (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) | 693 (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
702 (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20) | 694 (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
703 (6 << 16) | (el3 << 14) | (el3 << 12)); 695 | (6 << 16) | (el3 << 14) | (el3 << 12));
704 return true; 696 return true;
705 } 697 }
706} 698}
707 699
708static bool trap_debug32(struct kvm_vcpu *vcpu, 700static bool trap_debug32(struct kvm_vcpu *vcpu,
709 const struct sys_reg_params *p, 701 struct sys_reg_params *p,
710 const struct sys_reg_desc *r) 702 const struct sys_reg_desc *r)
711{ 703{
712 if (p->is_write) { 704 if (p->is_write) {
713 vcpu_cp14(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt); 705 vcpu_cp14(vcpu, r->reg) = p->regval;
714 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; 706 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
715 } else { 707 } else {
716 *vcpu_reg(vcpu, p->Rt) = vcpu_cp14(vcpu, r->reg); 708 p->regval = vcpu_cp14(vcpu, r->reg);
717 } 709 }
718 710
719 return true; 711 return true;
@@ -731,7 +723,7 @@ static bool trap_debug32(struct kvm_vcpu *vcpu,
731 */ 723 */
732 724
733static inline bool trap_xvr(struct kvm_vcpu *vcpu, 725static inline bool trap_xvr(struct kvm_vcpu *vcpu,
734 const struct sys_reg_params *p, 726 struct sys_reg_params *p,
735 const struct sys_reg_desc *rd) 727 const struct sys_reg_desc *rd)
736{ 728{
737 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; 729 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
@@ -740,12 +732,12 @@ static inline bool trap_xvr(struct kvm_vcpu *vcpu,
740 u64 val = *dbg_reg; 732 u64 val = *dbg_reg;
741 733
742 val &= 0xffffffffUL; 734 val &= 0xffffffffUL;
743 val |= *vcpu_reg(vcpu, p->Rt) << 32; 735 val |= p->regval << 32;
744 *dbg_reg = val; 736 *dbg_reg = val;
745 737
746 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; 738 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
747 } else { 739 } else {
748 *vcpu_reg(vcpu, p->Rt) = *dbg_reg >> 32; 740 p->regval = *dbg_reg >> 32;
749 } 741 }
750 742
751 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); 743 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
@@ -991,7 +983,7 @@ int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
991 * Return 0 if the access has been handled, and -1 if not. 983 * Return 0 if the access has been handled, and -1 if not.
992 */ 984 */
993static int emulate_cp(struct kvm_vcpu *vcpu, 985static int emulate_cp(struct kvm_vcpu *vcpu,
994 const struct sys_reg_params *params, 986 struct sys_reg_params *params,
995 const struct sys_reg_desc *table, 987 const struct sys_reg_desc *table,
996 size_t num) 988 size_t num)
997{ 989{
@@ -1062,12 +1054,12 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
1062{ 1054{
1063 struct sys_reg_params params; 1055 struct sys_reg_params params;
1064 u32 hsr = kvm_vcpu_get_hsr(vcpu); 1056 u32 hsr = kvm_vcpu_get_hsr(vcpu);
1057 int Rt = (hsr >> 5) & 0xf;
1065 int Rt2 = (hsr >> 10) & 0xf; 1058 int Rt2 = (hsr >> 10) & 0xf;
1066 1059
1067 params.is_aarch32 = true; 1060 params.is_aarch32 = true;
1068 params.is_32bit = false; 1061 params.is_32bit = false;
1069 params.CRm = (hsr >> 1) & 0xf; 1062 params.CRm = (hsr >> 1) & 0xf;
1070 params.Rt = (hsr >> 5) & 0xf;
1071 params.is_write = ((hsr & 1) == 0); 1063 params.is_write = ((hsr & 1) == 0);
1072 1064
1073 params.Op0 = 0; 1065 params.Op0 = 0;
@@ -1076,15 +1068,12 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
1076 params.CRn = 0; 1068 params.CRn = 0;
1077 1069
1078 /* 1070 /*
1079 * Massive hack here. Store Rt2 in the top 32bits so we only 1071 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
1080 * have one register to deal with. As we use the same trap
1081 * backends between AArch32 and AArch64, we get away with it. 1072 * backends between AArch32 and AArch64, we get away with it.
1082 */ 1073 */
1083 if (params.is_write) { 1074 if (params.is_write) {
1084 u64 val = *vcpu_reg(vcpu, params.Rt); 1075 params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
1085 val &= 0xffffffff; 1076 params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
1086 val |= *vcpu_reg(vcpu, Rt2) << 32;
1087 *vcpu_reg(vcpu, params.Rt) = val;
1088 } 1077 }
1089 1078
1090 if (!emulate_cp(vcpu, &params, target_specific, nr_specific)) 1079 if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
@@ -1095,11 +1084,10 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
1095 unhandled_cp_access(vcpu, &params); 1084 unhandled_cp_access(vcpu, &params);
1096 1085
1097out: 1086out:
1098 /* Do the opposite hack for the read side */ 1087 /* Split up the value between registers for the read side */
1099 if (!params.is_write) { 1088 if (!params.is_write) {
1100 u64 val = *vcpu_reg(vcpu, params.Rt); 1089 vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
1101 val >>= 32; 1090 vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
1102 *vcpu_reg(vcpu, Rt2) = val;
1103 } 1091 }
1104 1092
1105 return 1; 1093 return 1;
@@ -1118,21 +1106,24 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
1118{ 1106{
1119 struct sys_reg_params params; 1107 struct sys_reg_params params;
1120 u32 hsr = kvm_vcpu_get_hsr(vcpu); 1108 u32 hsr = kvm_vcpu_get_hsr(vcpu);
1109 int Rt = (hsr >> 5) & 0xf;
1121 1110
1122 params.is_aarch32 = true; 1111 params.is_aarch32 = true;
1123 params.is_32bit = true; 1112 params.is_32bit = true;
1124 params.CRm = (hsr >> 1) & 0xf; 1113 params.CRm = (hsr >> 1) & 0xf;
1125 params.Rt = (hsr >> 5) & 0xf; 1114 params.regval = vcpu_get_reg(vcpu, Rt);
1126 params.is_write = ((hsr & 1) == 0); 1115 params.is_write = ((hsr & 1) == 0);
1127 params.CRn = (hsr >> 10) & 0xf; 1116 params.CRn = (hsr >> 10) & 0xf;
1128 params.Op0 = 0; 1117 params.Op0 = 0;
1129 params.Op1 = (hsr >> 14) & 0x7; 1118 params.Op1 = (hsr >> 14) & 0x7;
1130 params.Op2 = (hsr >> 17) & 0x7; 1119 params.Op2 = (hsr >> 17) & 0x7;
1131 1120
1132 if (!emulate_cp(vcpu, &params, target_specific, nr_specific)) 1121 if (!emulate_cp(vcpu, &params, target_specific, nr_specific) ||
1133 return 1; 1122 !emulate_cp(vcpu, &params, global, nr_global)) {
1134 if (!emulate_cp(vcpu, &params, global, nr_global)) 1123 if (!params.is_write)
1124 vcpu_set_reg(vcpu, Rt, params.regval);
1135 return 1; 1125 return 1;
1126 }
1136 1127
1137 unhandled_cp_access(vcpu, &params); 1128 unhandled_cp_access(vcpu, &params);
1138 return 1; 1129 return 1;
@@ -1175,7 +1166,7 @@ int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
1175} 1166}
1176 1167
1177static int emulate_sys_reg(struct kvm_vcpu *vcpu, 1168static int emulate_sys_reg(struct kvm_vcpu *vcpu,
1178 const struct sys_reg_params *params) 1169 struct sys_reg_params *params)
1179{ 1170{
1180 size_t num; 1171 size_t num;
1181 const struct sys_reg_desc *table, *r; 1172 const struct sys_reg_desc *table, *r;
@@ -1230,6 +1221,8 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
1230{ 1221{
1231 struct sys_reg_params params; 1222 struct sys_reg_params params;
1232 unsigned long esr = kvm_vcpu_get_hsr(vcpu); 1223 unsigned long esr = kvm_vcpu_get_hsr(vcpu);
1224 int Rt = (esr >> 5) & 0x1f;
1225 int ret;
1233 1226
1234 trace_kvm_handle_sys_reg(esr); 1227 trace_kvm_handle_sys_reg(esr);
1235 1228
@@ -1240,10 +1233,14 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
1240 params.CRn = (esr >> 10) & 0xf; 1233 params.CRn = (esr >> 10) & 0xf;
1241 params.CRm = (esr >> 1) & 0xf; 1234 params.CRm = (esr >> 1) & 0xf;
1242 params.Op2 = (esr >> 17) & 0x7; 1235 params.Op2 = (esr >> 17) & 0x7;
1243 params.Rt = (esr >> 5) & 0x1f; 1236 params.regval = vcpu_get_reg(vcpu, Rt);
1244 params.is_write = !(esr & 1); 1237 params.is_write = !(esr & 1);
1245 1238
1246 return emulate_sys_reg(vcpu, &params); 1239 ret = emulate_sys_reg(vcpu, &params);
1240
1241 if (!params.is_write)
1242 vcpu_set_reg(vcpu, Rt, params.regval);
1243 return ret;
1247} 1244}
1248 1245
1249/****************************************************************************** 1246/******************************************************************************
diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h
index eaa324e4db4d..dbbb01cfbee9 100644
--- a/arch/arm64/kvm/sys_regs.h
+++ b/arch/arm64/kvm/sys_regs.h
@@ -28,7 +28,7 @@ struct sys_reg_params {
28 u8 CRn; 28 u8 CRn;
29 u8 CRm; 29 u8 CRm;
30 u8 Op2; 30 u8 Op2;
31 u8 Rt; 31 u64 regval;
32 bool is_write; 32 bool is_write;
33 bool is_aarch32; 33 bool is_aarch32;
34 bool is_32bit; /* Only valid if is_aarch32 is true */ 34 bool is_32bit; /* Only valid if is_aarch32 is true */
@@ -44,7 +44,7 @@ struct sys_reg_desc {
44 44
45 /* Trapped access from guest, if non-NULL. */ 45 /* Trapped access from guest, if non-NULL. */
46 bool (*access)(struct kvm_vcpu *, 46 bool (*access)(struct kvm_vcpu *,
47 const struct sys_reg_params *, 47 struct sys_reg_params *,
48 const struct sys_reg_desc *); 48 const struct sys_reg_desc *);
49 49
50 /* Initialization for vcpu. */ 50 /* Initialization for vcpu. */
@@ -77,9 +77,9 @@ static inline bool ignore_write(struct kvm_vcpu *vcpu,
77} 77}
78 78
79static inline bool read_zero(struct kvm_vcpu *vcpu, 79static inline bool read_zero(struct kvm_vcpu *vcpu,
80 const struct sys_reg_params *p) 80 struct sys_reg_params *p)
81{ 81{
82 *vcpu_reg(vcpu, p->Rt) = 0; 82 p->regval = 0;
83 return true; 83 return true;
84} 84}
85 85
diff --git a/arch/arm64/kvm/sys_regs_generic_v8.c b/arch/arm64/kvm/sys_regs_generic_v8.c
index 1e4576824165..ed90578fa120 100644
--- a/arch/arm64/kvm/sys_regs_generic_v8.c
+++ b/arch/arm64/kvm/sys_regs_generic_v8.c
@@ -31,13 +31,13 @@
31#include "sys_regs.h" 31#include "sys_regs.h"
32 32
33static bool access_actlr(struct kvm_vcpu *vcpu, 33static bool access_actlr(struct kvm_vcpu *vcpu,
34 const struct sys_reg_params *p, 34 struct sys_reg_params *p,
35 const struct sys_reg_desc *r) 35 const struct sys_reg_desc *r)
36{ 36{
37 if (p->is_write) 37 if (p->is_write)
38 return ignore_write(vcpu, p); 38 return ignore_write(vcpu, p);
39 39
40 *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, ACTLR_EL1); 40 p->regval = vcpu_sys_reg(vcpu, ACTLR_EL1);
41 return true; 41 return true;
42} 42}
43 43
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index f636a2639f03..e87f53ff5f58 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -76,13 +76,28 @@ static void flush_context(unsigned int cpu)
76 __flush_icache_all(); 76 __flush_icache_all();
77} 77}
78 78
79static int is_reserved_asid(u64 asid) 79static bool check_update_reserved_asid(u64 asid, u64 newasid)
80{ 80{
81 int cpu; 81 int cpu;
82 for_each_possible_cpu(cpu) 82 bool hit = false;
83 if (per_cpu(reserved_asids, cpu) == asid) 83
84 return 1; 84 /*
85 return 0; 85 * Iterate over the set of reserved ASIDs looking for a match.
86 * If we find one, then we can update our mm to use newasid
87 * (i.e. the same ASID in the current generation) but we can't
88 * exit the loop early, since we need to ensure that all copies
89 * of the old ASID are updated to reflect the mm. Failure to do
90 * so could result in us missing the reserved ASID in a future
91 * generation.
92 */
93 for_each_possible_cpu(cpu) {
94 if (per_cpu(reserved_asids, cpu) == asid) {
95 hit = true;
96 per_cpu(reserved_asids, cpu) = newasid;
97 }
98 }
99
100 return hit;
86} 101}
87 102
88static u64 new_context(struct mm_struct *mm, unsigned int cpu) 103static u64 new_context(struct mm_struct *mm, unsigned int cpu)
@@ -92,12 +107,14 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
92 u64 generation = atomic64_read(&asid_generation); 107 u64 generation = atomic64_read(&asid_generation);
93 108
94 if (asid != 0) { 109 if (asid != 0) {
110 u64 newasid = generation | (asid & ~ASID_MASK);
111
95 /* 112 /*
96 * If our current ASID was active during a rollover, we 113 * If our current ASID was active during a rollover, we
97 * can continue to use it and this was just a false alarm. 114 * can continue to use it and this was just a false alarm.
98 */ 115 */
99 if (is_reserved_asid(asid)) 116 if (check_update_reserved_asid(asid, newasid))
100 return generation | (asid & ~ASID_MASK); 117 return newasid;
101 118
102 /* 119 /*
103 * We had a valid ASID in a previous life, so try to re-use 120 * We had a valid ASID in a previous life, so try to re-use
@@ -105,7 +122,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
105 */ 122 */
106 asid &= ~ASID_MASK; 123 asid &= ~ASID_MASK;
107 if (!__test_and_set_bit(asid, asid_map)) 124 if (!__test_and_set_bit(asid, asid_map))
108 goto bump_gen; 125 return newasid;
109 } 126 }
110 127
111 /* 128 /*
@@ -129,10 +146,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
129set_asid: 146set_asid:
130 __set_bit(asid, asid_map); 147 __set_bit(asid, asid_map);
131 cur_idx = asid; 148 cur_idx = asid;
132 149 return asid | generation;
133bump_gen:
134 asid |= generation;
135 return asid;
136} 150}
137 151
138void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) 152void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 131a199114b4..7963aa4b5d28 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -18,6 +18,7 @@
18 */ 18 */
19 19
20#include <linux/gfp.h> 20#include <linux/gfp.h>
21#include <linux/acpi.h>
21#include <linux/export.h> 22#include <linux/export.h>
22#include <linux/slab.h> 23#include <linux/slab.h>
23#include <linux/genalloc.h> 24#include <linux/genalloc.h>
@@ -28,9 +29,6 @@
28 29
29#include <asm/cacheflush.h> 30#include <asm/cacheflush.h>
30 31
31struct dma_map_ops *dma_ops;
32EXPORT_SYMBOL(dma_ops);
33
34static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot, 32static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
35 bool coherent) 33 bool coherent)
36{ 34{
@@ -515,13 +513,7 @@ EXPORT_SYMBOL(dummy_dma_ops);
515 513
516static int __init arm64_dma_init(void) 514static int __init arm64_dma_init(void)
517{ 515{
518 int ret; 516 return atomic_pool_init();
519
520 dma_ops = &swiotlb_dma_ops;
521
522 ret = atomic_pool_init();
523
524 return ret;
525} 517}
526arch_initcall(arm64_dma_init); 518arch_initcall(arm64_dma_init);
527 519
@@ -552,10 +544,14 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
552{ 544{
553 bool coherent = is_device_dma_coherent(dev); 545 bool coherent = is_device_dma_coherent(dev);
554 int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent); 546 int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent);
547 size_t iosize = size;
555 void *addr; 548 void *addr;
556 549
557 if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n")) 550 if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
558 return NULL; 551 return NULL;
552
553 size = PAGE_ALIGN(size);
554
559 /* 555 /*
560 * Some drivers rely on this, and we probably don't want the 556 * Some drivers rely on this, and we probably don't want the
561 * possibility of stale kernel data being read by devices anyway. 557 * possibility of stale kernel data being read by devices anyway.
@@ -566,7 +562,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
566 struct page **pages; 562 struct page **pages;
567 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent); 563 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
568 564
569 pages = iommu_dma_alloc(dev, size, gfp, ioprot, handle, 565 pages = iommu_dma_alloc(dev, iosize, gfp, ioprot, handle,
570 flush_page); 566 flush_page);
571 if (!pages) 567 if (!pages)
572 return NULL; 568 return NULL;
@@ -574,7 +570,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
574 addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot, 570 addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
575 __builtin_return_address(0)); 571 __builtin_return_address(0));
576 if (!addr) 572 if (!addr)
577 iommu_dma_free(dev, pages, size, handle); 573 iommu_dma_free(dev, pages, iosize, handle);
578 } else { 574 } else {
579 struct page *page; 575 struct page *page;
580 /* 576 /*
@@ -591,7 +587,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
591 if (!addr) 587 if (!addr)
592 return NULL; 588 return NULL;
593 589
594 *handle = iommu_dma_map_page(dev, page, 0, size, ioprot); 590 *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
595 if (iommu_dma_mapping_error(dev, *handle)) { 591 if (iommu_dma_mapping_error(dev, *handle)) {
596 if (coherent) 592 if (coherent)
597 __free_pages(page, get_order(size)); 593 __free_pages(page, get_order(size));
@@ -606,6 +602,9 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
606static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, 602static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
607 dma_addr_t handle, struct dma_attrs *attrs) 603 dma_addr_t handle, struct dma_attrs *attrs)
608{ 604{
605 size_t iosize = size;
606
607 size = PAGE_ALIGN(size);
609 /* 608 /*
610 * @cpu_addr will be one of 3 things depending on how it was allocated: 609 * @cpu_addr will be one of 3 things depending on how it was allocated:
611 * - A remapped array of pages from iommu_dma_alloc(), for all 610 * - A remapped array of pages from iommu_dma_alloc(), for all
@@ -617,17 +616,17 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
617 * Hence how dodgy the below logic looks... 616 * Hence how dodgy the below logic looks...
618 */ 617 */
619 if (__in_atomic_pool(cpu_addr, size)) { 618 if (__in_atomic_pool(cpu_addr, size)) {
620 iommu_dma_unmap_page(dev, handle, size, 0, NULL); 619 iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
621 __free_from_pool(cpu_addr, size); 620 __free_from_pool(cpu_addr, size);
622 } else if (is_vmalloc_addr(cpu_addr)){ 621 } else if (is_vmalloc_addr(cpu_addr)){
623 struct vm_struct *area = find_vm_area(cpu_addr); 622 struct vm_struct *area = find_vm_area(cpu_addr);
624 623
625 if (WARN_ON(!area || !area->pages)) 624 if (WARN_ON(!area || !area->pages))
626 return; 625 return;
627 iommu_dma_free(dev, area->pages, size, &handle); 626 iommu_dma_free(dev, area->pages, iosize, &handle);
628 dma_common_free_remap(cpu_addr, size, VM_USERMAP); 627 dma_common_free_remap(cpu_addr, size, VM_USERMAP);
629 } else { 628 } else {
630 iommu_dma_unmap_page(dev, handle, size, 0, NULL); 629 iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
631 __free_pages(virt_to_page(cpu_addr), get_order(size)); 630 __free_pages(virt_to_page(cpu_addr), get_order(size));
632 } 631 }
633} 632}
@@ -984,8 +983,8 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
984void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, 983void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
985 struct iommu_ops *iommu, bool coherent) 984 struct iommu_ops *iommu, bool coherent)
986{ 985{
987 if (!acpi_disabled && !dev->archdata.dma_ops) 986 if (!dev->archdata.dma_ops)
988 dev->archdata.dma_ops = dma_ops; 987 dev->archdata.dma_ops = &swiotlb_dma_ops;
989 988
990 dev->archdata.dma_coherent = coherent; 989 dev->archdata.dma_coherent = coherent;
991 __iommu_setup_dma_ops(dev, dma_base, size, iommu); 990 __iommu_setup_dma_ops(dev, dma_base, size, iommu);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 19211c4a8911..92ddac1e8ca2 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -393,16 +393,16 @@ static struct fault_info {
393 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" }, 393 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" },
394 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" }, 394 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" },
395 { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, 395 { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
396 { do_bad, SIGBUS, 0, "reserved access flag fault" }, 396 { do_bad, SIGBUS, 0, "unknown 8" },
397 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, 397 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
398 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, 398 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
399 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" }, 399 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" },
400 { do_bad, SIGBUS, 0, "reserved permission fault" }, 400 { do_bad, SIGBUS, 0, "unknown 12" },
401 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" }, 401 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" },
402 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" }, 402 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" },
403 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" }, 403 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" },
404 { do_bad, SIGBUS, 0, "synchronous external abort" }, 404 { do_bad, SIGBUS, 0, "synchronous external abort" },
405 { do_bad, SIGBUS, 0, "asynchronous external abort" }, 405 { do_bad, SIGBUS, 0, "unknown 17" },
406 { do_bad, SIGBUS, 0, "unknown 18" }, 406 { do_bad, SIGBUS, 0, "unknown 18" },
407 { do_bad, SIGBUS, 0, "unknown 19" }, 407 { do_bad, SIGBUS, 0, "unknown 19" },
408 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, 408 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
@@ -410,16 +410,16 @@ static struct fault_info {
410 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, 410 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
411 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, 411 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
412 { do_bad, SIGBUS, 0, "synchronous parity error" }, 412 { do_bad, SIGBUS, 0, "synchronous parity error" },
413 { do_bad, SIGBUS, 0, "asynchronous parity error" }, 413 { do_bad, SIGBUS, 0, "unknown 25" },
414 { do_bad, SIGBUS, 0, "unknown 26" }, 414 { do_bad, SIGBUS, 0, "unknown 26" },
415 { do_bad, SIGBUS, 0, "unknown 27" }, 415 { do_bad, SIGBUS, 0, "unknown 27" },
416 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, 416 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
417 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, 417 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
418 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, 418 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
419 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, 419 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
420 { do_bad, SIGBUS, 0, "unknown 32" }, 420 { do_bad, SIGBUS, 0, "unknown 32" },
421 { do_bad, SIGBUS, BUS_ADRALN, "alignment fault" }, 421 { do_bad, SIGBUS, BUS_ADRALN, "alignment fault" },
422 { do_bad, SIGBUS, 0, "debug event" }, 422 { do_bad, SIGBUS, 0, "unknown 34" },
423 { do_bad, SIGBUS, 0, "unknown 35" }, 423 { do_bad, SIGBUS, 0, "unknown 35" },
424 { do_bad, SIGBUS, 0, "unknown 36" }, 424 { do_bad, SIGBUS, 0, "unknown 36" },
425 { do_bad, SIGBUS, 0, "unknown 37" }, 425 { do_bad, SIGBUS, 0, "unknown 37" },
@@ -433,21 +433,21 @@ static struct fault_info {
433 { do_bad, SIGBUS, 0, "unknown 45" }, 433 { do_bad, SIGBUS, 0, "unknown 45" },
434 { do_bad, SIGBUS, 0, "unknown 46" }, 434 { do_bad, SIGBUS, 0, "unknown 46" },
435 { do_bad, SIGBUS, 0, "unknown 47" }, 435 { do_bad, SIGBUS, 0, "unknown 47" },
436 { do_bad, SIGBUS, 0, "unknown 48" }, 436 { do_bad, SIGBUS, 0, "TLB conflict abort" },
437 { do_bad, SIGBUS, 0, "unknown 49" }, 437 { do_bad, SIGBUS, 0, "unknown 49" },
438 { do_bad, SIGBUS, 0, "unknown 50" }, 438 { do_bad, SIGBUS, 0, "unknown 50" },
439 { do_bad, SIGBUS, 0, "unknown 51" }, 439 { do_bad, SIGBUS, 0, "unknown 51" },
440 { do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" }, 440 { do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" },
441 { do_bad, SIGBUS, 0, "unknown 53" }, 441 { do_bad, SIGBUS, 0, "implementation fault (unsupported exclusive)" },
442 { do_bad, SIGBUS, 0, "unknown 54" }, 442 { do_bad, SIGBUS, 0, "unknown 54" },
443 { do_bad, SIGBUS, 0, "unknown 55" }, 443 { do_bad, SIGBUS, 0, "unknown 55" },
444 { do_bad, SIGBUS, 0, "unknown 56" }, 444 { do_bad, SIGBUS, 0, "unknown 56" },
445 { do_bad, SIGBUS, 0, "unknown 57" }, 445 { do_bad, SIGBUS, 0, "unknown 57" },
446 { do_bad, SIGBUS, 0, "implementation fault (coprocessor abort)" }, 446 { do_bad, SIGBUS, 0, "unknown 58" },
447 { do_bad, SIGBUS, 0, "unknown 59" }, 447 { do_bad, SIGBUS, 0, "unknown 59" },
448 { do_bad, SIGBUS, 0, "unknown 60" }, 448 { do_bad, SIGBUS, 0, "unknown 60" },
449 { do_bad, SIGBUS, 0, "unknown 61" }, 449 { do_bad, SIGBUS, 0, "section domain fault" },
450 { do_bad, SIGBUS, 0, "unknown 62" }, 450 { do_bad, SIGBUS, 0, "page domain fault" },
451 { do_bad, SIGBUS, 0, "unknown 63" }, 451 { do_bad, SIGBUS, 0, "unknown 63" },
452}; 452};
453 453
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index e3f563c81c48..873e363048c6 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -64,8 +64,12 @@ EXPORT_SYMBOL(phys_mem_access_prot);
64 64
65static void __init *early_alloc(unsigned long sz) 65static void __init *early_alloc(unsigned long sz)
66{ 66{
67 void *ptr = __va(memblock_alloc(sz, sz)); 67 phys_addr_t phys;
68 BUG_ON(!ptr); 68 void *ptr;
69
70 phys = memblock_alloc(sz, sz);
71 BUG_ON(!phys);
72 ptr = __va(phys);
69 memset(ptr, 0, sz); 73 memset(ptr, 0, sz);
70 return ptr; 74 return ptr;
71} 75}
@@ -81,55 +85,19 @@ static void split_pmd(pmd_t *pmd, pte_t *pte)
81 do { 85 do {
82 /* 86 /*
83 * Need to have the least restrictive permissions available 87 * Need to have the least restrictive permissions available
84 * permissions will be fixed up later. Default the new page 88 * permissions will be fixed up later
85 * range as contiguous ptes.
86 */ 89 */
87 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC_CONT)); 90 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
88 pfn++; 91 pfn++;
89 } while (pte++, i++, i < PTRS_PER_PTE); 92 } while (pte++, i++, i < PTRS_PER_PTE);
90} 93}
91 94
92/*
93 * Given a PTE with the CONT bit set, determine where the CONT range
94 * starts, and clear the entire range of PTE CONT bits.
95 */
96static void clear_cont_pte_range(pte_t *pte, unsigned long addr)
97{
98 int i;
99
100 pte -= CONT_RANGE_OFFSET(addr);
101 for (i = 0; i < CONT_PTES; i++) {
102 set_pte(pte, pte_mknoncont(*pte));
103 pte++;
104 }
105 flush_tlb_all();
106}
107
108/*
109 * Given a range of PTEs set the pfn and provided page protection flags
110 */
111static void __populate_init_pte(pte_t *pte, unsigned long addr,
112 unsigned long end, phys_addr_t phys,
113 pgprot_t prot)
114{
115 unsigned long pfn = __phys_to_pfn(phys);
116
117 do {
118 /* clear all the bits except the pfn, then apply the prot */
119 set_pte(pte, pfn_pte(pfn, prot));
120 pte++;
121 pfn++;
122 addr += PAGE_SIZE;
123 } while (addr != end);
124}
125
126static void alloc_init_pte(pmd_t *pmd, unsigned long addr, 95static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
127 unsigned long end, phys_addr_t phys, 96 unsigned long end, unsigned long pfn,
128 pgprot_t prot, 97 pgprot_t prot,
129 void *(*alloc)(unsigned long size)) 98 void *(*alloc)(unsigned long size))
130{ 99{
131 pte_t *pte; 100 pte_t *pte;
132 unsigned long next;
133 101
134 if (pmd_none(*pmd) || pmd_sect(*pmd)) { 102 if (pmd_none(*pmd) || pmd_sect(*pmd)) {
135 pte = alloc(PTRS_PER_PTE * sizeof(pte_t)); 103 pte = alloc(PTRS_PER_PTE * sizeof(pte_t));
@@ -142,27 +110,9 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
142 110
143 pte = pte_offset_kernel(pmd, addr); 111 pte = pte_offset_kernel(pmd, addr);
144 do { 112 do {
145 next = min(end, (addr + CONT_SIZE) & CONT_MASK); 113 set_pte(pte, pfn_pte(pfn, prot));
146 if (((addr | next | phys) & ~CONT_MASK) == 0) { 114 pfn++;
147 /* a block of CONT_PTES */ 115 } while (pte++, addr += PAGE_SIZE, addr != end);
148 __populate_init_pte(pte, addr, next, phys,
149 __pgprot(pgprot_val(prot) | PTE_CONT));
150 } else {
151 /*
152 * If the range being split is already inside of a
153 * contiguous range but this PTE isn't going to be
154 * contiguous, then we want to unmark the adjacent
155 * ranges, then update the portion of the range we
156 * are interrested in.
157 */
158 clear_cont_pte_range(pte, addr);
159 __populate_init_pte(pte, addr, next, phys, prot);
160 }
161
162 pte += (next - addr) >> PAGE_SHIFT;
163 phys += next - addr;
164 addr = next;
165 } while (addr != end);
166} 116}
167 117
168static void split_pud(pud_t *old_pud, pmd_t *pmd) 118static void split_pud(pud_t *old_pud, pmd_t *pmd)
@@ -223,7 +173,8 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
223 } 173 }
224 } 174 }
225 } else { 175 } else {
226 alloc_init_pte(pmd, addr, next, phys, prot, alloc); 176 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
177 prot, alloc);
227 } 178 }
228 phys += next - addr; 179 phys += next - addr;
229 } while (pmd++, addr = next, addr != end); 180 } while (pmd++, addr = next, addr != end);
@@ -362,8 +313,8 @@ static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
362 * for now. This will get more fine grained later once all memory 313 * for now. This will get more fine grained later once all memory
363 * is mapped 314 * is mapped
364 */ 315 */
365 unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE); 316 unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE);
366 unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); 317 unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE);
367 318
368 if (end < kernel_x_start) { 319 if (end < kernel_x_start) {
369 create_mapping(start, __phys_to_virt(start), 320 create_mapping(start, __phys_to_virt(start),
@@ -451,18 +402,18 @@ static void __init fixup_executable(void)
451{ 402{
452#ifdef CONFIG_DEBUG_RODATA 403#ifdef CONFIG_DEBUG_RODATA
453 /* now that we are actually fully mapped, make the start/end more fine grained */ 404 /* now that we are actually fully mapped, make the start/end more fine grained */
454 if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) { 405 if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {
455 unsigned long aligned_start = round_down(__pa(_stext), 406 unsigned long aligned_start = round_down(__pa(_stext),
456 SECTION_SIZE); 407 SWAPPER_BLOCK_SIZE);
457 408
458 create_mapping(aligned_start, __phys_to_virt(aligned_start), 409 create_mapping(aligned_start, __phys_to_virt(aligned_start),
459 __pa(_stext) - aligned_start, 410 __pa(_stext) - aligned_start,
460 PAGE_KERNEL); 411 PAGE_KERNEL);
461 } 412 }
462 413
463 if (!IS_ALIGNED((unsigned long)__init_end, SECTION_SIZE)) { 414 if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) {
464 unsigned long aligned_end = round_up(__pa(__init_end), 415 unsigned long aligned_end = round_up(__pa(__init_end),
465 SECTION_SIZE); 416 SWAPPER_BLOCK_SIZE);
466 create_mapping(__pa(__init_end), (unsigned long)__init_end, 417 create_mapping(__pa(__init_end), (unsigned long)__init_end,
467 aligned_end - __pa(__init_end), 418 aligned_end - __pa(__init_end),
468 PAGE_KERNEL); 419 PAGE_KERNEL);
@@ -475,7 +426,7 @@ void mark_rodata_ro(void)
475{ 426{
476 create_mapping_late(__pa(_stext), (unsigned long)_stext, 427 create_mapping_late(__pa(_stext), (unsigned long)_stext,
477 (unsigned long)_etext - (unsigned long)_stext, 428 (unsigned long)_etext - (unsigned long)_stext,
478 PAGE_KERNEL_EXEC | PTE_RDONLY); 429 PAGE_KERNEL_ROX);
479 430
480} 431}
481#endif 432#endif
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index cf3c7d4a1b58..b162ad70effc 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -50,7 +50,7 @@ static const int bpf2a64[] = {
50 [BPF_REG_8] = A64_R(21), 50 [BPF_REG_8] = A64_R(21),
51 [BPF_REG_9] = A64_R(22), 51 [BPF_REG_9] = A64_R(22),
52 /* read-only frame pointer to access stack */ 52 /* read-only frame pointer to access stack */
53 [BPF_REG_FP] = A64_FP, 53 [BPF_REG_FP] = A64_R(25),
54 /* temporary register for internal BPF JIT */ 54 /* temporary register for internal BPF JIT */
55 [TMP_REG_1] = A64_R(23), 55 [TMP_REG_1] = A64_R(23),
56 [TMP_REG_2] = A64_R(24), 56 [TMP_REG_2] = A64_R(24),
@@ -139,6 +139,12 @@ static inline int epilogue_offset(const struct jit_ctx *ctx)
139/* Stack must be multiples of 16B */ 139/* Stack must be multiples of 16B */
140#define STACK_ALIGN(sz) (((sz) + 15) & ~15) 140#define STACK_ALIGN(sz) (((sz) + 15) & ~15)
141 141
142#define _STACK_SIZE \
143 (MAX_BPF_STACK \
144 + 4 /* extra for skb_copy_bits buffer */)
145
146#define STACK_SIZE STACK_ALIGN(_STACK_SIZE)
147
142static void build_prologue(struct jit_ctx *ctx) 148static void build_prologue(struct jit_ctx *ctx)
143{ 149{
144 const u8 r6 = bpf2a64[BPF_REG_6]; 150 const u8 r6 = bpf2a64[BPF_REG_6];
@@ -150,10 +156,35 @@ static void build_prologue(struct jit_ctx *ctx)
150 const u8 rx = bpf2a64[BPF_REG_X]; 156 const u8 rx = bpf2a64[BPF_REG_X];
151 const u8 tmp1 = bpf2a64[TMP_REG_1]; 157 const u8 tmp1 = bpf2a64[TMP_REG_1];
152 const u8 tmp2 = bpf2a64[TMP_REG_2]; 158 const u8 tmp2 = bpf2a64[TMP_REG_2];
153 int stack_size = MAX_BPF_STACK;
154 159
155 stack_size += 4; /* extra for skb_copy_bits buffer */ 160 /*
156 stack_size = STACK_ALIGN(stack_size); 161 * BPF prog stack layout
162 *
163 * high
164 * original A64_SP => 0:+-----+ BPF prologue
165 * |FP/LR|
166 * current A64_FP => -16:+-----+
167 * | ... | callee saved registers
168 * +-----+
169 * | | x25/x26
170 * BPF fp register => -80:+-----+ <= (BPF_FP)
171 * | |
172 * | ... | BPF prog stack
173 * | |
174 * +-----+ <= (BPF_FP - MAX_BPF_STACK)
175 * |RSVD | JIT scratchpad
176 * current A64_SP => +-----+ <= (BPF_FP - STACK_SIZE)
177 * | |
178 * | ... | Function call stack
179 * | |
180 * +-----+
181 * low
182 *
183 */
184
185 /* Save FP and LR registers to stay align with ARM64 AAPCS */
186 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
187 emit(A64_MOV(1, A64_FP, A64_SP), ctx);
157 188
158 /* Save callee-saved register */ 189 /* Save callee-saved register */
159 emit(A64_PUSH(r6, r7, A64_SP), ctx); 190 emit(A64_PUSH(r6, r7, A64_SP), ctx);
@@ -161,12 +192,15 @@ static void build_prologue(struct jit_ctx *ctx)
161 if (ctx->tmp_used) 192 if (ctx->tmp_used)
162 emit(A64_PUSH(tmp1, tmp2, A64_SP), ctx); 193 emit(A64_PUSH(tmp1, tmp2, A64_SP), ctx);
163 194
164 /* Set up BPF stack */ 195 /* Save fp (x25) and x26. SP requires 16 bytes alignment */
165 emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx); 196 emit(A64_PUSH(fp, A64_R(26), A64_SP), ctx);
166 197
167 /* Set up frame pointer */ 198 /* Set up BPF prog stack base register (x25) */
168 emit(A64_MOV(1, fp, A64_SP), ctx); 199 emit(A64_MOV(1, fp, A64_SP), ctx);
169 200
201 /* Set up function call stack */
202 emit(A64_SUB_I(1, A64_SP, A64_SP, STACK_SIZE), ctx);
203
170 /* Clear registers A and X */ 204 /* Clear registers A and X */
171 emit_a64_mov_i64(ra, 0, ctx); 205 emit_a64_mov_i64(ra, 0, ctx);
172 emit_a64_mov_i64(rx, 0, ctx); 206 emit_a64_mov_i64(rx, 0, ctx);
@@ -182,13 +216,12 @@ static void build_epilogue(struct jit_ctx *ctx)
182 const u8 fp = bpf2a64[BPF_REG_FP]; 216 const u8 fp = bpf2a64[BPF_REG_FP];
183 const u8 tmp1 = bpf2a64[TMP_REG_1]; 217 const u8 tmp1 = bpf2a64[TMP_REG_1];
184 const u8 tmp2 = bpf2a64[TMP_REG_2]; 218 const u8 tmp2 = bpf2a64[TMP_REG_2];
185 int stack_size = MAX_BPF_STACK;
186
187 stack_size += 4; /* extra for skb_copy_bits buffer */
188 stack_size = STACK_ALIGN(stack_size);
189 219
190 /* We're done with BPF stack */ 220 /* We're done with BPF stack */
191 emit(A64_ADD_I(1, A64_SP, A64_SP, stack_size), ctx); 221 emit(A64_ADD_I(1, A64_SP, A64_SP, STACK_SIZE), ctx);
222
223 /* Restore fs (x25) and x26 */
224 emit(A64_POP(fp, A64_R(26), A64_SP), ctx);
192 225
193 /* Restore callee-saved register */ 226 /* Restore callee-saved register */
194 if (ctx->tmp_used) 227 if (ctx->tmp_used)
@@ -196,8 +229,8 @@ static void build_epilogue(struct jit_ctx *ctx)
196 emit(A64_POP(r8, r9, A64_SP), ctx); 229 emit(A64_POP(r8, r9, A64_SP), ctx);
197 emit(A64_POP(r6, r7, A64_SP), ctx); 230 emit(A64_POP(r6, r7, A64_SP), ctx);
198 231
199 /* Restore frame pointer */ 232 /* Restore FP/LR registers */
200 emit(A64_MOV(1, fp, A64_SP), ctx); 233 emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
201 234
202 /* Set return value */ 235 /* Set return value */
203 emit(A64_MOV(1, A64_R(0), r0), ctx); 236 emit(A64_MOV(1, A64_R(0), r0), ctx);
@@ -557,7 +590,25 @@ emit_cond_jmp:
557 case BPF_ST | BPF_MEM | BPF_H: 590 case BPF_ST | BPF_MEM | BPF_H:
558 case BPF_ST | BPF_MEM | BPF_B: 591 case BPF_ST | BPF_MEM | BPF_B:
559 case BPF_ST | BPF_MEM | BPF_DW: 592 case BPF_ST | BPF_MEM | BPF_DW:
560 goto notyet; 593 /* Load imm to a register then store it */
594 ctx->tmp_used = 1;
595 emit_a64_mov_i(1, tmp2, off, ctx);
596 emit_a64_mov_i(1, tmp, imm, ctx);
597 switch (BPF_SIZE(code)) {
598 case BPF_W:
599 emit(A64_STR32(tmp, dst, tmp2), ctx);
600 break;
601 case BPF_H:
602 emit(A64_STRH(tmp, dst, tmp2), ctx);
603 break;
604 case BPF_B:
605 emit(A64_STRB(tmp, dst, tmp2), ctx);
606 break;
607 case BPF_DW:
608 emit(A64_STR64(tmp, dst, tmp2), ctx);
609 break;
610 }
611 break;
561 612
562 /* STX: *(size *)(dst + off) = src */ 613 /* STX: *(size *)(dst + off) = src */
563 case BPF_STX | BPF_MEM | BPF_W: 614 case BPF_STX | BPF_MEM | BPF_W:
@@ -624,7 +675,7 @@ emit_cond_jmp:
624 return -EINVAL; 675 return -EINVAL;
625 } 676 }
626 emit_a64_mov_i64(r3, size, ctx); 677 emit_a64_mov_i64(r3, size, ctx);
627 emit(A64_ADD_I(1, r4, fp, MAX_BPF_STACK), ctx); 678 emit(A64_SUB_I(1, r4, fp, STACK_SIZE), ctx);
628 emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx); 679 emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx);
629 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx); 680 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
630 emit(A64_MOV(1, A64_FP, A64_SP), ctx); 681 emit(A64_MOV(1, A64_FP, A64_SP), ctx);
@@ -758,7 +809,7 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
758 if (bpf_jit_enable > 1) 809 if (bpf_jit_enable > 1)
759 bpf_jit_dump(prog->len, image_size, 2, ctx.image); 810 bpf_jit_dump(prog->len, image_size, 2, ctx.image);
760 811
761 bpf_flush_icache(ctx.image, ctx.image + ctx.idx); 812 bpf_flush_icache(header, ctx.image + ctx.idx);
762 813
763 set_memory_ro((unsigned long)header, header->pages); 814 set_memory_ro((unsigned long)header, header->pages);
764 prog->bpf_func = (void *)ctx.image; 815 prog->bpf_func = (void *)ctx.image;
diff --git a/arch/m68k/coldfire/m54xx.c b/arch/m68k/coldfire/m54xx.c
index f7836c6a6b60..c32f76791f48 100644
--- a/arch/m68k/coldfire/m54xx.c
+++ b/arch/m68k/coldfire/m54xx.c
@@ -98,7 +98,7 @@ static void __init mcf54xx_bootmem_alloc(void)
98 memstart = PAGE_ALIGN(_ramstart); 98 memstart = PAGE_ALIGN(_ramstart);
99 min_low_pfn = PFN_DOWN(_rambase); 99 min_low_pfn = PFN_DOWN(_rambase);
100 start_pfn = PFN_DOWN(memstart); 100 start_pfn = PFN_DOWN(memstart);
101 max_low_pfn = PFN_DOWN(_ramend); 101 max_pfn = max_low_pfn = PFN_DOWN(_ramend);
102 high_memory = (void *)_ramend; 102 high_memory = (void *)_ramend;
103 103
104 m68k_virt_to_node_shift = fls(_ramend - _rambase - 1) - 6; 104 m68k_virt_to_node_shift = fls(_ramend - _rambase - 1) - 6;
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 0793a7f17417..f9d96bf86910 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -4,7 +4,7 @@
4#include <uapi/asm/unistd.h> 4#include <uapi/asm/unistd.h>
5 5
6 6
7#define NR_syscalls 375 7#define NR_syscalls 376
8 8
9#define __ARCH_WANT_OLD_READDIR 9#define __ARCH_WANT_OLD_READDIR
10#define __ARCH_WANT_OLD_STAT 10#define __ARCH_WANT_OLD_STAT
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h
index 5e6fae6c275f..36cf129de663 100644
--- a/arch/m68k/include/uapi/asm/unistd.h
+++ b/arch/m68k/include/uapi/asm/unistd.h
@@ -380,5 +380,6 @@
380#define __NR_sendmmsg 372 380#define __NR_sendmmsg 372
381#define __NR_userfaultfd 373 381#define __NR_userfaultfd 373
382#define __NR_membarrier 374 382#define __NR_membarrier 374
383#define __NR_mlock2 375
383 384
384#endif /* _UAPI_ASM_M68K_UNISTD_H_ */ 385#endif /* _UAPI_ASM_M68K_UNISTD_H_ */
diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c
index 88c27d94a721..76b9113f3092 100644
--- a/arch/m68k/kernel/setup_no.c
+++ b/arch/m68k/kernel/setup_no.c
@@ -238,11 +238,14 @@ void __init setup_arch(char **cmdline_p)
238 * Give all the memory to the bootmap allocator, tell it to put the 238 * Give all the memory to the bootmap allocator, tell it to put the
239 * boot mem_map at the start of memory. 239 * boot mem_map at the start of memory.
240 */ 240 */
241 min_low_pfn = PFN_DOWN(memory_start);
242 max_pfn = max_low_pfn = PFN_DOWN(memory_end);
243
241 bootmap_size = init_bootmem_node( 244 bootmap_size = init_bootmem_node(
242 NODE_DATA(0), 245 NODE_DATA(0),
243 memory_start >> PAGE_SHIFT, /* map goes here */ 246 min_low_pfn, /* map goes here */
244 PAGE_OFFSET >> PAGE_SHIFT, /* 0 on coldfire */ 247 PFN_DOWN(PAGE_OFFSET),
245 memory_end >> PAGE_SHIFT); 248 max_pfn);
246 /* 249 /*
247 * Free the usable memory, we have to make sure we do not free 250 * Free the usable memory, we have to make sure we do not free
248 * the bootmem bitmap so we then reserve it after freeing it :-) 251 * the bootmem bitmap so we then reserve it after freeing it :-)
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index 5dd0e80042f5..282cd903f4c4 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -395,3 +395,4 @@ ENTRY(sys_call_table)
395 .long sys_sendmmsg 395 .long sys_sendmmsg
396 .long sys_userfaultfd 396 .long sys_userfaultfd
397 .long sys_membarrier 397 .long sys_membarrier
398 .long sys_mlock2 /* 375 */
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index b958916e5eac..8f37fdd80be9 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -250,7 +250,7 @@ void __init paging_init(void)
250 high_memory = phys_to_virt(max_addr); 250 high_memory = phys_to_virt(max_addr);
251 251
252 min_low_pfn = availmem >> PAGE_SHIFT; 252 min_low_pfn = availmem >> PAGE_SHIFT;
253 max_low_pfn = max_addr >> PAGE_SHIFT; 253 max_pfn = max_low_pfn = max_addr >> PAGE_SHIFT;
254 254
255 for (i = 0; i < m68k_num_memory; i++) { 255 for (i = 0; i < m68k_num_memory; i++) {
256 addr = m68k_memory[i].addr; 256 addr = m68k_memory[i].addr;
diff --git a/arch/m68k/sun3/config.c b/arch/m68k/sun3/config.c
index a8b942bf7163..2a5f43a68ae3 100644
--- a/arch/m68k/sun3/config.c
+++ b/arch/m68k/sun3/config.c
@@ -118,13 +118,13 @@ static void __init sun3_bootmem_alloc(unsigned long memory_start,
118 memory_end = memory_end & PAGE_MASK; 118 memory_end = memory_end & PAGE_MASK;
119 119
120 start_page = __pa(memory_start) >> PAGE_SHIFT; 120 start_page = __pa(memory_start) >> PAGE_SHIFT;
121 num_pages = __pa(memory_end) >> PAGE_SHIFT; 121 max_pfn = num_pages = __pa(memory_end) >> PAGE_SHIFT;
122 122
123 high_memory = (void *)memory_end; 123 high_memory = (void *)memory_end;
124 availmem = memory_start; 124 availmem = memory_start;
125 125
126 m68k_setup_node(0); 126 m68k_setup_node(0);
127 availmem += init_bootmem_node(NODE_DATA(0), start_page, 0, num_pages); 127 availmem += init_bootmem(start_page, num_pages);
128 availmem = (availmem + (PAGE_SIZE-1)) & PAGE_MASK; 128 availmem = (availmem + (PAGE_SIZE-1)) & PAGE_MASK;
129 129
130 free_bootmem(__pa(availmem), memory_end - (availmem)); 130 free_bootmem(__pa(availmem), memory_end - (availmem));
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index 1ba21204ebe0..8755d618e116 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -216,9 +216,9 @@ void __init plat_mem_setup(void)
216 AR71XX_RESET_SIZE); 216 AR71XX_RESET_SIZE);
217 ath79_pll_base = ioremap_nocache(AR71XX_PLL_BASE, 217 ath79_pll_base = ioremap_nocache(AR71XX_PLL_BASE,
218 AR71XX_PLL_SIZE); 218 AR71XX_PLL_SIZE);
219 ath79_detect_sys_type();
219 ath79_ddr_ctrl_init(); 220 ath79_ddr_ctrl_init();
220 221
221 ath79_detect_sys_type();
222 if (mips_machtype != ATH79_MACH_GENERIC_OF) 222 if (mips_machtype != ATH79_MACH_GENERIC_OF)
223 detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX); 223 detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX);
224 224
@@ -281,3 +281,8 @@ MIPS_MACHINE(ATH79_MACH_GENERIC,
281 "Generic", 281 "Generic",
282 "Generic AR71XX/AR724X/AR913X based board", 282 "Generic AR71XX/AR724X/AR913X based board",
283 ath79_generic_init); 283 ath79_generic_init);
284
285MIPS_MACHINE(ATH79_MACH_GENERIC_OF,
286 "DTB",
287 "Generic AR71XX/AR724X/AR913X based board (DT)",
288 NULL);
diff --git a/arch/mips/boot/dts/qca/ar9132.dtsi b/arch/mips/boot/dts/qca/ar9132.dtsi
index fb7734eadbf0..13d0439496a9 100644
--- a/arch/mips/boot/dts/qca/ar9132.dtsi
+++ b/arch/mips/boot/dts/qca/ar9132.dtsi
@@ -107,7 +107,7 @@
107 miscintc: interrupt-controller@18060010 { 107 miscintc: interrupt-controller@18060010 {
108 compatible = "qca,ar9132-misc-intc", 108 compatible = "qca,ar9132-misc-intc",
109 "qca,ar7100-misc-intc"; 109 "qca,ar7100-misc-intc";
110 reg = <0x18060010 0x4>; 110 reg = <0x18060010 0x8>;
111 111
112 interrupt-parent = <&cpuintc>; 112 interrupt-parent = <&cpuintc>;
113 interrupts = <6>; 113 interrupts = <6>;
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index ad1fccdb8d13..2046c0230224 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -200,8 +200,9 @@ static inline int pfn_valid(unsigned long pfn)
200{ 200{
201 /* avoid <linux/mm.h> include hell */ 201 /* avoid <linux/mm.h> include hell */
202 extern unsigned long max_mapnr; 202 extern unsigned long max_mapnr;
203 unsigned long pfn_offset = ARCH_PFN_OFFSET;
203 204
204 return pfn >= ARCH_PFN_OFFSET && pfn < max_mapnr; 205 return pfn >= pfn_offset && pfn < max_mapnr;
205} 206}
206 207
207#elif defined(CONFIG_SPARSEMEM) 208#elif defined(CONFIG_SPARSEMEM)
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index d5fa3eaf39a1..41b1b090f56f 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -1581,7 +1581,7 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
1581 1581
1582 base = (inst >> 21) & 0x1f; 1582 base = (inst >> 21) & 0x1f;
1583 op_inst = (inst >> 16) & 0x1f; 1583 op_inst = (inst >> 16) & 0x1f;
1584 offset = inst & 0xffff; 1584 offset = (int16_t)inst;
1585 cache = (inst >> 16) & 0x3; 1585 cache = (inst >> 16) & 0x3;
1586 op = (inst >> 18) & 0x7; 1586 op = (inst >> 18) & 0x7;
1587 1587
diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S
index 7bab3a4e8f7d..7e2210846b8b 100644
--- a/arch/mips/kvm/locore.S
+++ b/arch/mips/kvm/locore.S
@@ -157,9 +157,11 @@ FEXPORT(__kvm_mips_vcpu_run)
157 157
158FEXPORT(__kvm_mips_load_asid) 158FEXPORT(__kvm_mips_load_asid)
159 /* Set the ASID for the Guest Kernel */ 159 /* Set the ASID for the Guest Kernel */
160 INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ 160 PTR_L t0, VCPU_COP0(k1)
161 /* addresses shift to 0x80000000 */ 161 LONG_L t0, COP0_STATUS(t0)
162 bltz t0, 1f /* If kernel */ 162 andi t0, KSU_USER | ST0_ERL | ST0_EXL
163 xori t0, KSU_USER
164 bnez t0, 1f /* If kernel */
163 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ 165 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
164 INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */ 166 INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
1651: 1671:
@@ -474,9 +476,11 @@ __kvm_mips_return_to_guest:
474 mtc0 t0, CP0_EPC 476 mtc0 t0, CP0_EPC
475 477
476 /* Set the ASID for the Guest Kernel */ 478 /* Set the ASID for the Guest Kernel */
477 INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ 479 PTR_L t0, VCPU_COP0(k1)
478 /* addresses shift to 0x80000000 */ 480 LONG_L t0, COP0_STATUS(t0)
479 bltz t0, 1f /* If kernel */ 481 andi t0, KSU_USER | ST0_ERL | ST0_EXL
482 xori t0, KSU_USER
483 bnez t0, 1f /* If kernel */
480 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ 484 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
481 INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */ 485 INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
4821: 4861:
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 49ff3bfc007e..b9b803facdbf 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -279,7 +279,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
279 279
280 if (!gebase) { 280 if (!gebase) {
281 err = -ENOMEM; 281 err = -ENOMEM;
282 goto out_free_cpu; 282 goto out_uninit_cpu;
283 } 283 }
284 kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n", 284 kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
285 ALIGN(size, PAGE_SIZE), gebase); 285 ALIGN(size, PAGE_SIZE), gebase);
@@ -343,6 +343,9 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
343out_free_gebase: 343out_free_gebase:
344 kfree(gebase); 344 kfree(gebase);
345 345
346out_uninit_cpu:
347 kvm_vcpu_uninit(vcpu);
348
346out_free_cpu: 349out_free_cpu:
347 kfree(vcpu); 350 kfree(vcpu);
348 351
diff --git a/arch/mips/pci/pci-rt2880.c b/arch/mips/pci/pci-rt2880.c
index 8a978022630b..dbbeccc3d714 100644
--- a/arch/mips/pci/pci-rt2880.c
+++ b/arch/mips/pci/pci-rt2880.c
@@ -11,6 +11,7 @@
11 * by the Free Software Foundation. 11 * by the Free Software Foundation.
12 */ 12 */
13 13
14#include <linux/delay.h>
14#include <linux/types.h> 15#include <linux/types.h>
15#include <linux/pci.h> 16#include <linux/pci.h>
16#include <linux/io.h> 17#include <linux/io.h>
@@ -232,8 +233,7 @@ static int rt288x_pci_probe(struct platform_device *pdev)
232 ioport_resource.end = RT2880_PCI_IO_BASE + RT2880_PCI_IO_SIZE - 1; 233 ioport_resource.end = RT2880_PCI_IO_BASE + RT2880_PCI_IO_SIZE - 1;
233 234
234 rt2880_pci_reg_write(0, RT2880_PCI_REG_PCICFG_ADDR); 235 rt2880_pci_reg_write(0, RT2880_PCI_REG_PCICFG_ADDR);
235 for (i = 0; i < 0xfffff; i++) 236 udelay(1);
236 ;
237 237
238 rt2880_pci_reg_write(0x79, RT2880_PCI_REG_ARBCTL); 238 rt2880_pci_reg_write(0x79, RT2880_PCI_REG_ARBCTL);
239 rt2880_pci_reg_write(0x07FF0001, RT2880_PCI_REG_BAR0SETUP_ADDR); 239 rt2880_pci_reg_write(0x07FF0001, RT2880_PCI_REG_BAR0SETUP_ADDR);
diff --git a/arch/mips/pmcs-msp71xx/msp_setup.c b/arch/mips/pmcs-msp71xx/msp_setup.c
index 4f925e06c414..78b2ef49dbc7 100644
--- a/arch/mips/pmcs-msp71xx/msp_setup.c
+++ b/arch/mips/pmcs-msp71xx/msp_setup.c
@@ -10,6 +10,8 @@
10 * option) any later version. 10 * option) any later version.
11 */ 11 */
12 12
13#include <linux/delay.h>
14
13#include <asm/bootinfo.h> 15#include <asm/bootinfo.h>
14#include <asm/cacheflush.h> 16#include <asm/cacheflush.h>
15#include <asm/idle.h> 17#include <asm/idle.h>
@@ -77,7 +79,7 @@ void msp7120_reset(void)
77 */ 79 */
78 80
79 /* Wait a bit for the DDRC to settle */ 81 /* Wait a bit for the DDRC to settle */
80 for (i = 0; i < 100000000; i++); 82 mdelay(125);
81 83
82#if defined(CONFIG_PMC_MSP7120_GW) 84#if defined(CONFIG_PMC_MSP7120_GW)
83 /* 85 /*
diff --git a/arch/mips/sni/reset.c b/arch/mips/sni/reset.c
index 244f9427625b..db8f88b6a3af 100644
--- a/arch/mips/sni/reset.c
+++ b/arch/mips/sni/reset.c
@@ -3,6 +3,8 @@
3 * 3 *
4 * Reset a SNI machine. 4 * Reset a SNI machine.
5 */ 5 */
6#include <linux/delay.h>
7
6#include <asm/io.h> 8#include <asm/io.h>
7#include <asm/reboot.h> 9#include <asm/reboot.h>
8#include <asm/sni.h> 10#include <asm/sni.h>
@@ -32,9 +34,9 @@ void sni_machine_restart(char *command)
32 for (;;) { 34 for (;;) {
33 for (i = 0; i < 100; i++) { 35 for (i = 0; i < 100; i++) {
34 kb_wait(); 36 kb_wait();
35 for (j = 0; j < 100000 ; j++) 37 udelay(50);
36 /* nothing */;
37 outb_p(0xfe, 0x64); /* pulse reset low */ 38 outb_p(0xfe, 0x64); /* pulse reset low */
39 udelay(50);
38 } 40 }
39 } 41 }
40} 42}
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index 4434b54e1d87..78ae5552fdb8 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -1,6 +1,7 @@
1config MN10300 1config MN10300
2 def_bool y 2 def_bool y
3 select HAVE_OPROFILE 3 select HAVE_OPROFILE
4 select HAVE_UID16
4 select GENERIC_IRQ_SHOW 5 select GENERIC_IRQ_SHOW
5 select ARCH_WANT_IPC_PARSE_VERSION 6 select ARCH_WANT_IPC_PARSE_VERSION
6 select HAVE_ARCH_TRACEHOOK 7 select HAVE_ARCH_TRACEHOOK
@@ -37,9 +38,6 @@ config HIGHMEM
37config NUMA 38config NUMA
38 def_bool n 39 def_bool n
39 40
40config UID16
41 def_bool y
42
43config RWSEM_GENERIC_SPINLOCK 41config RWSEM_GENERIC_SPINLOCK
44 def_bool y 42 def_bool y
45 43
diff --git a/arch/nios2/mm/cacheflush.c b/arch/nios2/mm/cacheflush.c
index 223cdcc8203f..87bf88ed04c6 100644
--- a/arch/nios2/mm/cacheflush.c
+++ b/arch/nios2/mm/cacheflush.c
@@ -23,22 +23,6 @@ static void __flush_dcache(unsigned long start, unsigned long end)
23 end += (cpuinfo.dcache_line_size - 1); 23 end += (cpuinfo.dcache_line_size - 1);
24 end &= ~(cpuinfo.dcache_line_size - 1); 24 end &= ~(cpuinfo.dcache_line_size - 1);
25 25
26 for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
27 __asm__ __volatile__ (" flushda 0(%0)\n"
28 : /* Outputs */
29 : /* Inputs */ "r"(addr)
30 /* : No clobber */);
31 }
32}
33
34static void __flush_dcache_all(unsigned long start, unsigned long end)
35{
36 unsigned long addr;
37
38 start &= ~(cpuinfo.dcache_line_size - 1);
39 end += (cpuinfo.dcache_line_size - 1);
40 end &= ~(cpuinfo.dcache_line_size - 1);
41
42 if (end > start + cpuinfo.dcache_size) 26 if (end > start + cpuinfo.dcache_size)
43 end = start + cpuinfo.dcache_size; 27 end = start + cpuinfo.dcache_size;
44 28
@@ -112,7 +96,7 @@ static void flush_aliases(struct address_space *mapping, struct page *page)
112 96
113void flush_cache_all(void) 97void flush_cache_all(void)
114{ 98{
115 __flush_dcache_all(0, cpuinfo.dcache_size); 99 __flush_dcache(0, cpuinfo.dcache_size);
116 __flush_icache(0, cpuinfo.icache_size); 100 __flush_icache(0, cpuinfo.icache_size);
117} 101}
118 102
@@ -182,7 +166,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
182 */ 166 */
183 unsigned long start = (unsigned long)page_address(page); 167 unsigned long start = (unsigned long)page_address(page);
184 168
185 __flush_dcache_all(start, start + PAGE_SIZE); 169 __flush_dcache(start, start + PAGE_SIZE);
186} 170}
187 171
188void flush_dcache_page(struct page *page) 172void flush_dcache_page(struct page *page)
@@ -268,7 +252,7 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
268{ 252{
269 flush_cache_page(vma, user_vaddr, page_to_pfn(page)); 253 flush_cache_page(vma, user_vaddr, page_to_pfn(page));
270 memcpy(dst, src, len); 254 memcpy(dst, src, len);
271 __flush_dcache_all((unsigned long)src, (unsigned long)src + len); 255 __flush_dcache((unsigned long)src, (unsigned long)src + len);
272 if (vma->vm_flags & VM_EXEC) 256 if (vma->vm_flags & VM_EXEC)
273 __flush_icache((unsigned long)src, (unsigned long)src + len); 257 __flush_icache((unsigned long)src, (unsigned long)src + len);
274} 258}
@@ -279,7 +263,7 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
279{ 263{
280 flush_cache_page(vma, user_vaddr, page_to_pfn(page)); 264 flush_cache_page(vma, user_vaddr, page_to_pfn(page));
281 memcpy(dst, src, len); 265 memcpy(dst, src, len);
282 __flush_dcache_all((unsigned long)dst, (unsigned long)dst + len); 266 __flush_dcache((unsigned long)dst, (unsigned long)dst + len);
283 if (vma->vm_flags & VM_EXEC) 267 if (vma->vm_flags & VM_EXEC)
284 __flush_icache((unsigned long)dst, (unsigned long)dst + len); 268 __flush_icache((unsigned long)dst, (unsigned long)dst + len);
285} 269}
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index c36546959e86..729f89163bc3 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -108,6 +108,9 @@ config PGTABLE_LEVELS
108 default 3 if 64BIT && PARISC_PAGE_SIZE_4KB 108 default 3 if 64BIT && PARISC_PAGE_SIZE_4KB
109 default 2 109 default 2
110 110
111config SYS_SUPPORTS_HUGETLBFS
112 def_bool y if PA20
113
111source "init/Kconfig" 114source "init/Kconfig"
112 115
113source "kernel/Kconfig.freezer" 116source "kernel/Kconfig.freezer"
diff --git a/arch/parisc/include/asm/hugetlb.h b/arch/parisc/include/asm/hugetlb.h
new file mode 100644
index 000000000000..7d56a9ccb752
--- /dev/null
+++ b/arch/parisc/include/asm/hugetlb.h
@@ -0,0 +1,85 @@
1#ifndef _ASM_PARISC64_HUGETLB_H
2#define _ASM_PARISC64_HUGETLB_H
3
4#include <asm/page.h>
5#include <asm-generic/hugetlb.h>
6
7
8void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
9 pte_t *ptep, pte_t pte);
10
11pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
12 pte_t *ptep);
13
14static inline int is_hugepage_only_range(struct mm_struct *mm,
15 unsigned long addr,
16 unsigned long len) {
17 return 0;
18}
19
20/*
21 * If the arch doesn't supply something else, assume that hugepage
22 * size aligned regions are ok without further preparation.
23 */
24static inline int prepare_hugepage_range(struct file *file,
25 unsigned long addr, unsigned long len)
26{
27 if (len & ~HPAGE_MASK)
28 return -EINVAL;
29 if (addr & ~HPAGE_MASK)
30 return -EINVAL;
31 return 0;
32}
33
34static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
35 unsigned long addr, unsigned long end,
36 unsigned long floor,
37 unsigned long ceiling)
38{
39 free_pgd_range(tlb, addr, end, floor, ceiling);
40}
41
42static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
43 unsigned long addr, pte_t *ptep)
44{
45}
46
47static inline int huge_pte_none(pte_t pte)
48{
49 return pte_none(pte);
50}
51
52static inline pte_t huge_pte_wrprotect(pte_t pte)
53{
54 return pte_wrprotect(pte);
55}
56
57static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
58 unsigned long addr, pte_t *ptep)
59{
60 pte_t old_pte = *ptep;
61 set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
62}
63
64static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
65 unsigned long addr, pte_t *ptep,
66 pte_t pte, int dirty)
67{
68 int changed = !pte_same(*ptep, pte);
69 if (changed) {
70 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
71 flush_tlb_page(vma, addr);
72 }
73 return changed;
74}
75
76static inline pte_t huge_ptep_get(pte_t *ptep)
77{
78 return *ptep;
79}
80
81static inline void arch_clear_hugepage_flags(struct page *page)
82{
83}
84
85#endif /* _ASM_PARISC64_HUGETLB_H */
diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
index 60d5d174dfe4..80e742a1c162 100644
--- a/arch/parisc/include/asm/page.h
+++ b/arch/parisc/include/asm/page.h
@@ -145,11 +145,22 @@ extern int npmem_ranges;
145#endif /* CONFIG_DISCONTIGMEM */ 145#endif /* CONFIG_DISCONTIGMEM */
146 146
147#ifdef CONFIG_HUGETLB_PAGE 147#ifdef CONFIG_HUGETLB_PAGE
148#define HPAGE_SHIFT 22 /* 4MB (is this fixed?) */ 148#define HPAGE_SHIFT PMD_SHIFT /* fixed for transparent huge pages */
149#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) 149#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
150#define HPAGE_MASK (~(HPAGE_SIZE - 1)) 150#define HPAGE_MASK (~(HPAGE_SIZE - 1))
151#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 151#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
152
153#if defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB)
154# define REAL_HPAGE_SHIFT 20 /* 20 = 1MB */
155# define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_1M
156#elif !defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB)
157# define REAL_HPAGE_SHIFT 22 /* 22 = 4MB */
158# define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4M
159#else
160# define REAL_HPAGE_SHIFT 24 /* 24 = 16MB */
161# define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16M
152#endif 162#endif
163#endif /* CONFIG_HUGETLB_PAGE */
153 164
154#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 165#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
155 166
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
index 3edbb9fc91b4..f2fd327dce2e 100644
--- a/arch/parisc/include/asm/pgalloc.h
+++ b/arch/parisc/include/asm/pgalloc.h
@@ -35,7 +35,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
35 PxD_FLAG_VALID | 35 PxD_FLAG_VALID |
36 PxD_FLAG_ATTACHED) 36 PxD_FLAG_ATTACHED)
37 + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT)); 37 + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT));
38 /* The first pmd entry also is marked with _PAGE_GATEWAY as 38 /* The first pmd entry also is marked with PxD_FLAG_ATTACHED as
39 * a signal that this pmd may not be freed */ 39 * a signal that this pmd may not be freed */
40 __pgd_val_set(*pgd, PxD_FLAG_ATTACHED); 40 __pgd_val_set(*pgd, PxD_FLAG_ATTACHED);
41#endif 41#endif
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index f93c4a4e6580..d8534f95915a 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -83,7 +83,11 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
83 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e)) 83 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
84 84
85/* This is the size of the initially mapped kernel memory */ 85/* This is the size of the initially mapped kernel memory */
86#define KERNEL_INITIAL_ORDER 24 /* 0 to 1<<24 = 16MB */ 86#ifdef CONFIG_64BIT
87#define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */
88#else
89#define KERNEL_INITIAL_ORDER 24 /* 1<<24 = 16MB */
90#endif
87#define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER) 91#define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER)
88 92
89#if CONFIG_PGTABLE_LEVELS == 3 93#if CONFIG_PGTABLE_LEVELS == 3
@@ -167,7 +171,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
167#define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */ 171#define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */
168#define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */ 172#define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */
169#define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */ 173#define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */
170/* bit 21 was formerly the FLUSH bit but is now unused */ 174#define _PAGE_HPAGE_BIT 21 /* (0x400) Software: Huge Page */
171#define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page */ 175#define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page */
172 176
173/* N.B. The bits are defined in terms of a 32 bit word above, so the */ 177/* N.B. The bits are defined in terms of a 32 bit word above, so the */
@@ -194,6 +198,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
194#define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT)) 198#define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT))
195#define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT)) 199#define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT))
196#define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT)) 200#define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT))
201#define _PAGE_HUGE (1 << xlate_pabit(_PAGE_HPAGE_BIT))
197#define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT)) 202#define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT))
198 203
199#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED) 204#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
@@ -217,7 +222,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
217#define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT)) 222#define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT))
218#define PxD_FLAG_MASK (0xf) 223#define PxD_FLAG_MASK (0xf)
219#define PxD_FLAG_SHIFT (4) 224#define PxD_FLAG_SHIFT (4)
220#define PxD_VALUE_SHIFT (8) /* (PAGE_SHIFT-PxD_FLAG_SHIFT) */ 225#define PxD_VALUE_SHIFT (PFN_PTE_SHIFT-PxD_FLAG_SHIFT)
221 226
222#ifndef __ASSEMBLY__ 227#ifndef __ASSEMBLY__
223 228
@@ -363,6 +368,18 @@ static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return
363static inline pte_t pte_mkspecial(pte_t pte) { return pte; } 368static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
364 369
365/* 370/*
371 * Huge pte definitions.
372 */
373#ifdef CONFIG_HUGETLB_PAGE
374#define pte_huge(pte) (pte_val(pte) & _PAGE_HUGE)
375#define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_HUGE))
376#else
377#define pte_huge(pte) (0)
378#define pte_mkhuge(pte) (pte)
379#endif
380
381
382/*
366 * Conversion functions: convert a page and protection to a page entry, 383 * Conversion functions: convert a page and protection to a page entry,
367 * and a page entry and page directory to the page they refer to. 384 * and a page entry and page directory to the page they refer to.
368 */ 385 */
@@ -410,8 +427,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
410/* Find an entry in the second-level page table.. */ 427/* Find an entry in the second-level page table.. */
411 428
412#if CONFIG_PGTABLE_LEVELS == 3 429#if CONFIG_PGTABLE_LEVELS == 3
430#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
413#define pmd_offset(dir,address) \ 431#define pmd_offset(dir,address) \
414((pmd_t *) pgd_page_vaddr(*(dir)) + (((address)>>PMD_SHIFT) & (PTRS_PER_PMD-1))) 432((pmd_t *) pgd_page_vaddr(*(dir)) + pmd_index(address))
415#else 433#else
416#define pmd_offset(dir,addr) ((pmd_t *) dir) 434#define pmd_offset(dir,addr) ((pmd_t *) dir)
417#endif 435#endif
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index 54adb60c0a42..7e759ecb1343 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -192,33 +192,6 @@ void show_trace(struct task_struct *task, unsigned long *stack);
192 */ 192 */
193typedef unsigned int elf_caddr_t; 193typedef unsigned int elf_caddr_t;
194 194
195#define start_thread_som(regs, new_pc, new_sp) do { \
196 unsigned long *sp = (unsigned long *)new_sp; \
197 __u32 spaceid = (__u32)current->mm->context; \
198 unsigned long pc = (unsigned long)new_pc; \
199 /* offset pc for priv. level */ \
200 pc |= 3; \
201 \
202 regs->iasq[0] = spaceid; \
203 regs->iasq[1] = spaceid; \
204 regs->iaoq[0] = pc; \
205 regs->iaoq[1] = pc + 4; \
206 regs->sr[2] = LINUX_GATEWAY_SPACE; \
207 regs->sr[3] = 0xffff; \
208 regs->sr[4] = spaceid; \
209 regs->sr[5] = spaceid; \
210 regs->sr[6] = spaceid; \
211 regs->sr[7] = spaceid; \
212 regs->gr[ 0] = USER_PSW; \
213 regs->gr[30] = ((new_sp)+63)&~63; \
214 regs->gr[31] = pc; \
215 \
216 get_user(regs->gr[26],&sp[0]); \
217 get_user(regs->gr[25],&sp[-1]); \
218 get_user(regs->gr[24],&sp[-2]); \
219 get_user(regs->gr[23],&sp[-3]); \
220} while(0)
221
222/* The ELF abi wants things done a "wee bit" differently than 195/* The ELF abi wants things done a "wee bit" differently than
223 * som does. Supporting this behavior here avoids 196 * som does. Supporting this behavior here avoids
224 * having our own version of create_elf_tables. 197 * having our own version of create_elf_tables.
diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h
index ecc3ae1ca28e..dd4d1876a020 100644
--- a/arch/parisc/include/uapi/asm/mman.h
+++ b/arch/parisc/include/uapi/asm/mman.h
@@ -49,16 +49,6 @@
49#define MADV_DONTFORK 10 /* don't inherit across fork */ 49#define MADV_DONTFORK 10 /* don't inherit across fork */
50#define MADV_DOFORK 11 /* do inherit across fork */ 50#define MADV_DOFORK 11 /* do inherit across fork */
51 51
52/* The range 12-64 is reserved for page size specification. */
53#define MADV_4K_PAGES 12 /* Use 4K pages */
54#define MADV_16K_PAGES 14 /* Use 16K pages */
55#define MADV_64K_PAGES 16 /* Use 64K pages */
56#define MADV_256K_PAGES 18 /* Use 256K pages */
57#define MADV_1M_PAGES 20 /* Use 1 Megabyte pages */
58#define MADV_4M_PAGES 22 /* Use 4 Megabyte pages */
59#define MADV_16M_PAGES 24 /* Use 16 Megabyte pages */
60#define MADV_64M_PAGES 26 /* Use 64 Megabyte pages */
61
62#define MADV_MERGEABLE 65 /* KSM may merge identical pages */ 52#define MADV_MERGEABLE 65 /* KSM may merge identical pages */
63#define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */ 53#define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */
64 54
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
index 59001cea13f9..d2f62570a7b1 100644
--- a/arch/parisc/kernel/asm-offsets.c
+++ b/arch/parisc/kernel/asm-offsets.c
@@ -290,6 +290,14 @@ int main(void)
290 DEFINE(ASM_PFN_PTE_SHIFT, PFN_PTE_SHIFT); 290 DEFINE(ASM_PFN_PTE_SHIFT, PFN_PTE_SHIFT);
291 DEFINE(ASM_PT_INITIAL, PT_INITIAL); 291 DEFINE(ASM_PT_INITIAL, PT_INITIAL);
292 BLANK(); 292 BLANK();
293 /* HUGEPAGE_SIZE is only used in vmlinux.lds.S to align kernel text
294 * and kernel data on physical huge pages */
295#ifdef CONFIG_HUGETLB_PAGE
296 DEFINE(HUGEPAGE_SIZE, 1UL << REAL_HPAGE_SHIFT);
297#else
298 DEFINE(HUGEPAGE_SIZE, PAGE_SIZE);
299#endif
300 BLANK();
293 DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip)); 301 DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip));
294 DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space)); 302 DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));
295 DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr)); 303 DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr));
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index c5ef4081b01d..623496c11756 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -502,21 +502,38 @@
502 STREG \pte,0(\ptp) 502 STREG \pte,0(\ptp)
503 .endm 503 .endm
504 504
505 /* We have (depending on the page size):
506 * - 38 to 52-bit Physical Page Number
507 * - 12 to 26-bit page offset
508 */
505 /* bitshift difference between a PFN (based on kernel's PAGE_SIZE) 509 /* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
506 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */ 510 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
507 #define PAGE_ADD_SHIFT (PAGE_SHIFT-12) 511 #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
512 #define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12)
508 513
509 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ 514 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
510 .macro convert_for_tlb_insert20 pte 515 .macro convert_for_tlb_insert20 pte,tmp
516#ifdef CONFIG_HUGETLB_PAGE
517 copy \pte,\tmp
518 extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
519 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
520
521 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
522 (63-58)+PAGE_ADD_SHIFT,\pte
523 extrd,u,*= \tmp,_PAGE_HPAGE_BIT+32,1,%r0
524 depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
525 (63-58)+PAGE_ADD_HUGE_SHIFT,\pte
526#else /* Huge pages disabled */
511 extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\ 527 extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
512 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte 528 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
513 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\ 529 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
514 (63-58)+PAGE_ADD_SHIFT,\pte 530 (63-58)+PAGE_ADD_SHIFT,\pte
531#endif
515 .endm 532 .endm
516 533
517 /* Convert the pte and prot to tlb insertion values. How 534 /* Convert the pte and prot to tlb insertion values. How
518 * this happens is quite subtle, read below */ 535 * this happens is quite subtle, read below */
519 .macro make_insert_tlb spc,pte,prot 536 .macro make_insert_tlb spc,pte,prot,tmp
520 space_to_prot \spc \prot /* create prot id from space */ 537 space_to_prot \spc \prot /* create prot id from space */
521 /* The following is the real subtlety. This is depositing 538 /* The following is the real subtlety. This is depositing
522 * T <-> _PAGE_REFTRAP 539 * T <-> _PAGE_REFTRAP
@@ -553,7 +570,7 @@
553 depdi 1,12,1,\prot 570 depdi 1,12,1,\prot
554 571
555 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ 572 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
556 convert_for_tlb_insert20 \pte 573 convert_for_tlb_insert20 \pte \tmp
557 .endm 574 .endm
558 575
559 /* Identical macro to make_insert_tlb above, except it 576 /* Identical macro to make_insert_tlb above, except it
@@ -646,17 +663,12 @@
646 663
647 664
648 /* 665 /*
649 * Align fault_vector_20 on 4K boundary so that both 666 * Fault_vectors are architecturally required to be aligned on a 2K
650 * fault_vector_11 and fault_vector_20 are on the 667 * boundary
651 * same page. This is only necessary as long as we
652 * write protect the kernel text, which we may stop
653 * doing once we use large page translations to cover
654 * the static part of the kernel address space.
655 */ 668 */
656 669
657 .text 670 .text
658 671 .align 2048
659 .align 4096
660 672
661ENTRY(fault_vector_20) 673ENTRY(fault_vector_20)
662 /* First vector is invalid (0) */ 674 /* First vector is invalid (0) */
@@ -1147,7 +1159,7 @@ dtlb_miss_20w:
1147 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w 1159 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w
1148 update_accessed ptp,pte,t0,t1 1160 update_accessed ptp,pte,t0,t1
1149 1161
1150 make_insert_tlb spc,pte,prot 1162 make_insert_tlb spc,pte,prot,t1
1151 1163
1152 idtlbt pte,prot 1164 idtlbt pte,prot
1153 1165
@@ -1173,7 +1185,7 @@ nadtlb_miss_20w:
1173 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w 1185 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
1174 update_accessed ptp,pte,t0,t1 1186 update_accessed ptp,pte,t0,t1
1175 1187
1176 make_insert_tlb spc,pte,prot 1188 make_insert_tlb spc,pte,prot,t1
1177 1189
1178 idtlbt pte,prot 1190 idtlbt pte,prot
1179 1191
@@ -1267,7 +1279,7 @@ dtlb_miss_20:
1267 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20 1279 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20
1268 update_accessed ptp,pte,t0,t1 1280 update_accessed ptp,pte,t0,t1
1269 1281
1270 make_insert_tlb spc,pte,prot 1282 make_insert_tlb spc,pte,prot,t1
1271 1283
1272 f_extend pte,t1 1284 f_extend pte,t1
1273 1285
@@ -1295,7 +1307,7 @@ nadtlb_miss_20:
1295 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20 1307 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20
1296 update_accessed ptp,pte,t0,t1 1308 update_accessed ptp,pte,t0,t1
1297 1309
1298 make_insert_tlb spc,pte,prot 1310 make_insert_tlb spc,pte,prot,t1
1299 1311
1300 f_extend pte,t1 1312 f_extend pte,t1
1301 1313
@@ -1404,7 +1416,7 @@ itlb_miss_20w:
1404 tlb_lock spc,ptp,pte,t0,t1,itlb_fault 1416 tlb_lock spc,ptp,pte,t0,t1,itlb_fault
1405 update_accessed ptp,pte,t0,t1 1417 update_accessed ptp,pte,t0,t1
1406 1418
1407 make_insert_tlb spc,pte,prot 1419 make_insert_tlb spc,pte,prot,t1
1408 1420
1409 iitlbt pte,prot 1421 iitlbt pte,prot
1410 1422
@@ -1428,7 +1440,7 @@ naitlb_miss_20w:
1428 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w 1440 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w
1429 update_accessed ptp,pte,t0,t1 1441 update_accessed ptp,pte,t0,t1
1430 1442
1431 make_insert_tlb spc,pte,prot 1443 make_insert_tlb spc,pte,prot,t1
1432 1444
1433 iitlbt pte,prot 1445 iitlbt pte,prot
1434 1446
@@ -1514,7 +1526,7 @@ itlb_miss_20:
1514 tlb_lock spc,ptp,pte,t0,t1,itlb_fault 1526 tlb_lock spc,ptp,pte,t0,t1,itlb_fault
1515 update_accessed ptp,pte,t0,t1 1527 update_accessed ptp,pte,t0,t1
1516 1528
1517 make_insert_tlb spc,pte,prot 1529 make_insert_tlb spc,pte,prot,t1
1518 1530
1519 f_extend pte,t1 1531 f_extend pte,t1
1520 1532
@@ -1534,7 +1546,7 @@ naitlb_miss_20:
1534 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20 1546 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20
1535 update_accessed ptp,pte,t0,t1 1547 update_accessed ptp,pte,t0,t1
1536 1548
1537 make_insert_tlb spc,pte,prot 1549 make_insert_tlb spc,pte,prot,t1
1538 1550
1539 f_extend pte,t1 1551 f_extend pte,t1
1540 1552
@@ -1566,7 +1578,7 @@ dbit_trap_20w:
1566 tlb_lock spc,ptp,pte,t0,t1,dbit_fault 1578 tlb_lock spc,ptp,pte,t0,t1,dbit_fault
1567 update_dirty ptp,pte,t1 1579 update_dirty ptp,pte,t1
1568 1580
1569 make_insert_tlb spc,pte,prot 1581 make_insert_tlb spc,pte,prot,t1
1570 1582
1571 idtlbt pte,prot 1583 idtlbt pte,prot
1572 1584
@@ -1610,7 +1622,7 @@ dbit_trap_20:
1610 tlb_lock spc,ptp,pte,t0,t1,dbit_fault 1622 tlb_lock spc,ptp,pte,t0,t1,dbit_fault
1611 update_dirty ptp,pte,t1 1623 update_dirty ptp,pte,t1
1612 1624
1613 make_insert_tlb spc,pte,prot 1625 make_insert_tlb spc,pte,prot,t1
1614 1626
1615 f_extend pte,t1 1627 f_extend pte,t1
1616 1628
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
index e7d64527aff9..75aa0db9f69e 100644
--- a/arch/parisc/kernel/head.S
+++ b/arch/parisc/kernel/head.S
@@ -69,7 +69,7 @@ $bss_loop:
69 stw,ma %arg2,4(%r1) 69 stw,ma %arg2,4(%r1)
70 stw,ma %arg3,4(%r1) 70 stw,ma %arg3,4(%r1)
71 71
72 /* Initialize startup VM. Just map first 8/16 MB of memory */ 72 /* Initialize startup VM. Just map first 16/32 MB of memory */
73 load32 PA(swapper_pg_dir),%r4 73 load32 PA(swapper_pg_dir),%r4
74 mtctl %r4,%cr24 /* Initialize kernel root pointer */ 74 mtctl %r4,%cr24 /* Initialize kernel root pointer */
75 mtctl %r4,%cr25 /* Initialize user root pointer */ 75 mtctl %r4,%cr25 /* Initialize user root pointer */
@@ -107,7 +107,7 @@ $bss_loop:
107 /* Now initialize the PTEs themselves. We use RWX for 107 /* Now initialize the PTEs themselves. We use RWX for
108 * everything ... it will get remapped correctly later */ 108 * everything ... it will get remapped correctly later */
109 ldo 0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */ 109 ldo 0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */
110 ldi (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */ 110 load32 (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
111 load32 PA(pg0),%r1 111 load32 PA(pg0),%r1
112 112
113$pgt_fill_loop: 113$pgt_fill_loop:
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index 72a3c658ad7b..f7ea626e29c9 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -130,7 +130,16 @@ void __init setup_arch(char **cmdline_p)
130 printk(KERN_INFO "The 32-bit Kernel has started...\n"); 130 printk(KERN_INFO "The 32-bit Kernel has started...\n");
131#endif 131#endif
132 132
133 printk(KERN_INFO "Default page size is %dKB.\n", (int)(PAGE_SIZE / 1024)); 133 printk(KERN_INFO "Kernel default page size is %d KB. Huge pages ",
134 (int)(PAGE_SIZE / 1024));
135#ifdef CONFIG_HUGETLB_PAGE
136 printk(KERN_CONT "enabled with %d MB physical and %d MB virtual size",
137 1 << (REAL_HPAGE_SHIFT - 20), 1 << (HPAGE_SHIFT - 20));
138#else
139 printk(KERN_CONT "disabled");
140#endif
141 printk(KERN_CONT ".\n");
142
134 143
135 pdc_console_init(); 144 pdc_console_init();
136 145
@@ -377,6 +386,7 @@ arch_initcall(parisc_init);
377void start_parisc(void) 386void start_parisc(void)
378{ 387{
379 extern void start_kernel(void); 388 extern void start_kernel(void);
389 extern void early_trap_init(void);
380 390
381 int ret, cpunum; 391 int ret, cpunum;
382 struct pdc_coproc_cfg coproc_cfg; 392 struct pdc_coproc_cfg coproc_cfg;
@@ -397,6 +407,8 @@ void start_parisc(void)
397 panic("must have an fpu to boot linux"); 407 panic("must have an fpu to boot linux");
398 } 408 }
399 409
410 early_trap_init(); /* initialize checksum of fault_vector */
411
400 start_kernel(); 412 start_kernel();
401 // not reached 413 // not reached
402} 414}
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 0b8d26d3ba43..3fbd7252a4b2 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -369,7 +369,7 @@ tracesys_exit:
369 ldo -16(%r30),%r29 /* Reference param save area */ 369 ldo -16(%r30),%r29 /* Reference param save area */
370#endif 370#endif
371 ldo TASK_REGS(%r1),%r26 371 ldo TASK_REGS(%r1),%r26
372 bl do_syscall_trace_exit,%r2 372 BL do_syscall_trace_exit,%r2
373 STREG %r28,TASK_PT_GR28(%r1) /* save return value now */ 373 STREG %r28,TASK_PT_GR28(%r1) /* save return value now */
374 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ 374 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
375 LDREG TI_TASK(%r1), %r1 375 LDREG TI_TASK(%r1), %r1
@@ -390,7 +390,7 @@ tracesys_sigexit:
390#ifdef CONFIG_64BIT 390#ifdef CONFIG_64BIT
391 ldo -16(%r30),%r29 /* Reference param save area */ 391 ldo -16(%r30),%r29 /* Reference param save area */
392#endif 392#endif
393 bl do_syscall_trace_exit,%r2 393 BL do_syscall_trace_exit,%r2
394 ldo TASK_REGS(%r1),%r26 394 ldo TASK_REGS(%r1),%r26
395 395
396 ldil L%syscall_exit_rfi,%r1 396 ldil L%syscall_exit_rfi,%r1
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index b99b39f1da02..553b09855cfd 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -807,7 +807,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
807} 807}
808 808
809 809
810int __init check_ivt(void *iva) 810void __init initialize_ivt(const void *iva)
811{ 811{
812 extern u32 os_hpmc_size; 812 extern u32 os_hpmc_size;
813 extern const u32 os_hpmc[]; 813 extern const u32 os_hpmc[];
@@ -818,8 +818,8 @@ int __init check_ivt(void *iva)
818 u32 *hpmcp; 818 u32 *hpmcp;
819 u32 length; 819 u32 length;
820 820
821 if (strcmp((char *)iva, "cows can fly")) 821 if (strcmp((const char *)iva, "cows can fly"))
822 return -1; 822 panic("IVT invalid");
823 823
824 ivap = (u32 *)iva; 824 ivap = (u32 *)iva;
825 825
@@ -839,28 +839,23 @@ int __init check_ivt(void *iva)
839 check += ivap[i]; 839 check += ivap[i];
840 840
841 ivap[5] = -check; 841 ivap[5] = -check;
842
843 return 0;
844} 842}
845 843
846#ifndef CONFIG_64BIT
847extern const void fault_vector_11;
848#endif
849extern const void fault_vector_20;
850 844
851void __init trap_init(void) 845/* early_trap_init() is called before we set up kernel mappings and
846 * write-protect the kernel */
847void __init early_trap_init(void)
852{ 848{
853 void *iva; 849 extern const void fault_vector_20;
854 850
855 if (boot_cpu_data.cpu_type >= pcxu) 851#ifndef CONFIG_64BIT
856 iva = (void *) &fault_vector_20; 852 extern const void fault_vector_11;
857 else 853 initialize_ivt(&fault_vector_11);
858#ifdef CONFIG_64BIT
859 panic("Can't boot 64-bit OS on PA1.1 processor!");
860#else
861 iva = (void *) &fault_vector_11;
862#endif 854#endif
863 855
864 if (check_ivt(iva)) 856 initialize_ivt(&fault_vector_20);
865 panic("IVT invalid"); 857}
858
859void __init trap_init(void)
860{
866} 861}
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index 0dacc5ca555a..308f29081d46 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -60,7 +60,7 @@ SECTIONS
60 EXIT_DATA 60 EXIT_DATA
61 } 61 }
62 PERCPU_SECTION(8) 62 PERCPU_SECTION(8)
63 . = ALIGN(PAGE_SIZE); 63 . = ALIGN(HUGEPAGE_SIZE);
64 __init_end = .; 64 __init_end = .;
65 /* freed after init ends here */ 65 /* freed after init ends here */
66 66
@@ -116,7 +116,7 @@ SECTIONS
116 * that we can properly leave these 116 * that we can properly leave these
117 * as writable 117 * as writable
118 */ 118 */
119 . = ALIGN(PAGE_SIZE); 119 . = ALIGN(HUGEPAGE_SIZE);
120 data_start = .; 120 data_start = .;
121 121
122 EXCEPTION_TABLE(8) 122 EXCEPTION_TABLE(8)
@@ -135,8 +135,11 @@ SECTIONS
135 _edata = .; 135 _edata = .;
136 136
137 /* BSS */ 137 /* BSS */
138 BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 8) 138 BSS_SECTION(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE)
139
140 /* bootmap is allocated in setup_bootmem() directly behind bss. */
139 141
142 . = ALIGN(HUGEPAGE_SIZE);
140 _end = . ; 143 _end = . ;
141 144
142 STABS_DEBUG 145 STABS_DEBUG
diff --git a/arch/parisc/mm/Makefile b/arch/parisc/mm/Makefile
index 758ceefb373a..134393de69d2 100644
--- a/arch/parisc/mm/Makefile
+++ b/arch/parisc/mm/Makefile
@@ -3,3 +3,4 @@
3# 3#
4 4
5obj-y := init.o fault.o ioremap.o 5obj-y := init.o fault.o ioremap.o
6obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c
new file mode 100644
index 000000000000..f6fdc77a72bd
--- /dev/null
+++ b/arch/parisc/mm/hugetlbpage.c
@@ -0,0 +1,161 @@
1/*
2 * PARISC64 Huge TLB page support.
3 *
4 * This parisc implementation is heavily based on the SPARC and x86 code.
5 *
6 * Copyright (C) 2015 Helge Deller <deller@gmx.de>
7 */
8
9#include <linux/fs.h>
10#include <linux/mm.h>
11#include <linux/hugetlb.h>
12#include <linux/pagemap.h>
13#include <linux/sysctl.h>
14
15#include <asm/mman.h>
16#include <asm/pgalloc.h>
17#include <asm/tlb.h>
18#include <asm/tlbflush.h>
19#include <asm/cacheflush.h>
20#include <asm/mmu_context.h>
21
22
23unsigned long
24hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
25 unsigned long len, unsigned long pgoff, unsigned long flags)
26{
27 struct hstate *h = hstate_file(file);
28
29 if (len & ~huge_page_mask(h))
30 return -EINVAL;
31 if (len > TASK_SIZE)
32 return -ENOMEM;
33
34 if (flags & MAP_FIXED)
35 if (prepare_hugepage_range(file, addr, len))
36 return -EINVAL;
37
38 if (addr)
39 addr = ALIGN(addr, huge_page_size(h));
40
41 /* we need to make sure the colouring is OK */
42 return arch_get_unmapped_area(file, addr, len, pgoff, flags);
43}
44
45
46pte_t *huge_pte_alloc(struct mm_struct *mm,
47 unsigned long addr, unsigned long sz)
48{
49 pgd_t *pgd;
50 pud_t *pud;
51 pmd_t *pmd;
52 pte_t *pte = NULL;
53
54 /* We must align the address, because our caller will run
55 * set_huge_pte_at() on whatever we return, which writes out
56 * all of the sub-ptes for the hugepage range. So we have
57 * to give it the first such sub-pte.
58 */
59 addr &= HPAGE_MASK;
60
61 pgd = pgd_offset(mm, addr);
62 pud = pud_alloc(mm, pgd, addr);
63 if (pud) {
64 pmd = pmd_alloc(mm, pud, addr);
65 if (pmd)
66 pte = pte_alloc_map(mm, NULL, pmd, addr);
67 }
68 return pte;
69}
70
71pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
72{
73 pgd_t *pgd;
74 pud_t *pud;
75 pmd_t *pmd;
76 pte_t *pte = NULL;
77
78 addr &= HPAGE_MASK;
79
80 pgd = pgd_offset(mm, addr);
81 if (!pgd_none(*pgd)) {
82 pud = pud_offset(pgd, addr);
83 if (!pud_none(*pud)) {
84 pmd = pmd_offset(pud, addr);
85 if (!pmd_none(*pmd))
86 pte = pte_offset_map(pmd, addr);
87 }
88 }
89 return pte;
90}
91
92/* Purge data and instruction TLB entries. Must be called holding
93 * the pa_tlb_lock. The TLB purge instructions are slow on SMP
94 * machines since the purge must be broadcast to all CPUs.
95 */
96static inline void purge_tlb_entries_huge(struct mm_struct *mm, unsigned long addr)
97{
98 int i;
99
100 /* We may use multiple physical huge pages (e.g. 2x1 MB) to emulate
101 * Linux standard huge pages (e.g. 2 MB) */
102 BUILD_BUG_ON(REAL_HPAGE_SHIFT > HPAGE_SHIFT);
103
104 addr &= HPAGE_MASK;
105 addr |= _HUGE_PAGE_SIZE_ENCODING_DEFAULT;
106
107 for (i = 0; i < (1 << (HPAGE_SHIFT-REAL_HPAGE_SHIFT)); i++) {
108 mtsp(mm->context, 1);
109 pdtlb(addr);
110 if (unlikely(split_tlb))
111 pitlb(addr);
112 addr += (1UL << REAL_HPAGE_SHIFT);
113 }
114}
115
116void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
117 pte_t *ptep, pte_t entry)
118{
119 unsigned long addr_start;
120 int i;
121
122 addr &= HPAGE_MASK;
123 addr_start = addr;
124
125 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
126 /* Directly write pte entry. We could call set_pte_at(mm, addr, ptep, entry)
127 * instead, but then we get double locking on pa_tlb_lock. */
128 *ptep = entry;
129 ptep++;
130
131 /* Drop the PAGE_SIZE/non-huge tlb entry */
132 purge_tlb_entries(mm, addr);
133
134 addr += PAGE_SIZE;
135 pte_val(entry) += PAGE_SIZE;
136 }
137
138 purge_tlb_entries_huge(mm, addr_start);
139}
140
141
142pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
143 pte_t *ptep)
144{
145 pte_t entry;
146
147 entry = *ptep;
148 set_huge_pte_at(mm, addr, ptep, __pte(0));
149
150 return entry;
151}
152
153int pmd_huge(pmd_t pmd)
154{
155 return 0;
156}
157
158int pud_huge(pud_t pud)
159{
160 return 0;
161}
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index c5fec4890fdf..1b366c477687 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -409,15 +409,11 @@ static void __init map_pages(unsigned long start_vaddr,
409 unsigned long vaddr; 409 unsigned long vaddr;
410 unsigned long ro_start; 410 unsigned long ro_start;
411 unsigned long ro_end; 411 unsigned long ro_end;
412 unsigned long fv_addr; 412 unsigned long kernel_end;
413 unsigned long gw_addr;
414 extern const unsigned long fault_vector_20;
415 extern void * const linux_gateway_page;
416 413
417 ro_start = __pa((unsigned long)_text); 414 ro_start = __pa((unsigned long)_text);
418 ro_end = __pa((unsigned long)&data_start); 415 ro_end = __pa((unsigned long)&data_start);
419 fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK; 416 kernel_end = __pa((unsigned long)&_end);
420 gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK;
421 417
422 end_paddr = start_paddr + size; 418 end_paddr = start_paddr + size;
423 419
@@ -475,24 +471,25 @@ static void __init map_pages(unsigned long start_vaddr,
475 for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) { 471 for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
476 pte_t pte; 472 pte_t pte;
477 473
478 /*
479 * Map the fault vector writable so we can
480 * write the HPMC checksum.
481 */
482 if (force) 474 if (force)
483 pte = __mk_pte(address, pgprot); 475 pte = __mk_pte(address, pgprot);
484 else if (parisc_text_address(vaddr) && 476 else if (parisc_text_address(vaddr)) {
485 address != fv_addr)
486 pte = __mk_pte(address, PAGE_KERNEL_EXEC); 477 pte = __mk_pte(address, PAGE_KERNEL_EXEC);
478 if (address >= ro_start && address < kernel_end)
479 pte = pte_mkhuge(pte);
480 }
487 else 481 else
488#if defined(CONFIG_PARISC_PAGE_SIZE_4KB) 482#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
489 if (address >= ro_start && address < ro_end 483 if (address >= ro_start && address < ro_end) {
490 && address != fv_addr 484 pte = __mk_pte(address, PAGE_KERNEL_EXEC);
491 && address != gw_addr) 485 pte = pte_mkhuge(pte);
492 pte = __mk_pte(address, PAGE_KERNEL_RO); 486 } else
493 else
494#endif 487#endif
488 {
495 pte = __mk_pte(address, pgprot); 489 pte = __mk_pte(address, pgprot);
490 if (address >= ro_start && address < kernel_end)
491 pte = pte_mkhuge(pte);
492 }
496 493
497 if (address >= end_paddr) { 494 if (address >= end_paddr) {
498 if (force) 495 if (force)
@@ -536,15 +533,12 @@ void free_initmem(void)
536 533
537 /* force the kernel to see the new TLB entries */ 534 /* force the kernel to see the new TLB entries */
538 __flush_tlb_range(0, init_begin, init_end); 535 __flush_tlb_range(0, init_begin, init_end);
539 /* Attempt to catch anyone trying to execute code here 536
540 * by filling the page with BRK insns.
541 */
542 memset((void *)init_begin, 0x00, init_end - init_begin);
543 /* finally dump all the instructions which were cached, since the 537 /* finally dump all the instructions which were cached, since the
544 * pages are no-longer executable */ 538 * pages are no-longer executable */
545 flush_icache_range(init_begin, init_end); 539 flush_icache_range(init_begin, init_end);
546 540
547 free_initmem_default(-1); 541 free_initmem_default(POISON_FREE_INITMEM);
548 542
549 /* set up a new led state on systems shipped LED State panel */ 543 /* set up a new led state on systems shipped LED State panel */
550 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); 544 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
@@ -728,8 +722,8 @@ static void __init pagetable_init(void)
728 unsigned long size; 722 unsigned long size;
729 723
730 start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT; 724 start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
731 end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT);
732 size = pmem_ranges[range].pages << PAGE_SHIFT; 725 size = pmem_ranges[range].pages << PAGE_SHIFT;
726 end_paddr = start_paddr + size;
733 727
734 map_pages((unsigned long)__va(start_paddr), start_paddr, 728 map_pages((unsigned long)__va(start_paddr), start_paddr,
735 size, PAGE_KERNEL, 0); 729 size, PAGE_KERNEL, 0);
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index a908ada8e0a5..2220f7a60def 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -108,6 +108,7 @@
108#define MSR_TS_T __MASK(MSR_TS_T_LG) /* Transaction Transactional */ 108#define MSR_TS_T __MASK(MSR_TS_T_LG) /* Transaction Transactional */
109#define MSR_TS_MASK (MSR_TS_T | MSR_TS_S) /* Transaction State bits */ 109#define MSR_TS_MASK (MSR_TS_T | MSR_TS_S) /* Transaction State bits */
110#define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */ 110#define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */
111#define MSR_TM_RESV(x) (((x) & MSR_TS_MASK) == MSR_TS_MASK) /* Reserved */
111#define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T) 112#define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T)
112#define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S) 113#define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S)
113 114
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index c9e26cb264f4..f2b0b1b0c72a 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -382,3 +382,4 @@ COMPAT_SYS(shmat)
382SYSCALL(shmdt) 382SYSCALL(shmdt)
383SYSCALL(shmget) 383SYSCALL(shmget)
384COMPAT_SYS(shmctl) 384COMPAT_SYS(shmctl)
385SYSCALL(mlock2)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 6d8f8023ac27..4b6b8ace18e0 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
12#include <uapi/asm/unistd.h> 12#include <uapi/asm/unistd.h>
13 13
14 14
15#define __NR_syscalls 378 15#define __NR_syscalls 379
16 16
17#define __NR__exit __NR_exit 17#define __NR__exit __NR_exit
18#define NR_syscalls __NR_syscalls 18#define NR_syscalls __NR_syscalls
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index 81579e93c659..1effea5193d6 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -400,5 +400,6 @@
400#define __NR_shmdt 375 400#define __NR_shmdt 375
401#define __NR_shmget 376 401#define __NR_shmget 376
402#define __NR_shmctl 377 402#define __NR_shmctl 377
403#define __NR_mlock2 378
403 404
404#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ 405#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 75b6676c1a0b..646bf4d222c1 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -551,6 +551,24 @@ static void tm_reclaim_thread(struct thread_struct *thr,
551 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1; 551 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1;
552 } 552 }
553 553
554 /*
555 * Use the current MSR TM suspended bit to track if we have
556 * checkpointed state outstanding.
557 * On signal delivery, we'd normally reclaim the checkpointed
558 * state to obtain stack pointer (see:get_tm_stackpointer()).
559 * This will then directly return to userspace without going
560 * through __switch_to(). However, if the stack frame is bad,
561 * we need to exit this thread which calls __switch_to() which
562 * will again attempt to reclaim the already saved tm state.
563 * Hence we need to check that we've not already reclaimed
564 * this state.
565 * We do this using the current MSR, rather tracking it in
566 * some specific thread_struct bit, as it has the additional
567 * benifit of checking for a potential TM bad thing exception.
568 */
569 if (!MSR_TM_SUSPENDED(mfmsr()))
570 return;
571
554 tm_reclaim(thr, thr->regs->msr, cause); 572 tm_reclaim(thr, thr->regs->msr, cause);
555 573
556 /* Having done the reclaim, we now have the checkpointed 574 /* Having done the reclaim, we now have the checkpointed
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 0dbee465af7a..ef7c24e84a62 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -875,6 +875,15 @@ static long restore_tm_user_regs(struct pt_regs *regs,
875 return 1; 875 return 1;
876#endif /* CONFIG_SPE */ 876#endif /* CONFIG_SPE */
877 877
878 /* Get the top half of the MSR from the user context */
879 if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
880 return 1;
881 msr_hi <<= 32;
882 /* If TM bits are set to the reserved value, it's an invalid context */
883 if (MSR_TM_RESV(msr_hi))
884 return 1;
885 /* Pull in the MSR TM bits from the user context */
886 regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
878 /* Now, recheckpoint. This loads up all of the checkpointed (older) 887 /* Now, recheckpoint. This loads up all of the checkpointed (older)
879 * registers, including FP and V[S]Rs. After recheckpointing, the 888 * registers, including FP and V[S]Rs. After recheckpointing, the
880 * transactional versions should be loaded. 889 * transactional versions should be loaded.
@@ -884,11 +893,6 @@ static long restore_tm_user_regs(struct pt_regs *regs,
884 current->thread.tm_texasr |= TEXASR_FS; 893 current->thread.tm_texasr |= TEXASR_FS;
885 /* This loads the checkpointed FP/VEC state, if used */ 894 /* This loads the checkpointed FP/VEC state, if used */
886 tm_recheckpoint(&current->thread, msr); 895 tm_recheckpoint(&current->thread, msr);
887 /* Get the top half of the MSR */
888 if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
889 return 1;
890 /* Pull in MSR TM from user context */
891 regs->msr = (regs->msr & ~MSR_TS_MASK) | ((msr_hi<<32) & MSR_TS_MASK);
892 896
893 /* This loads the speculative FP/VEC state, if used */ 897 /* This loads the speculative FP/VEC state, if used */
894 if (msr & MSR_FP) { 898 if (msr & MSR_FP) {
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 20756dfb9f34..c676ecec0869 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -438,6 +438,10 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
438 438
439 /* get MSR separately, transfer the LE bit if doing signal return */ 439 /* get MSR separately, transfer the LE bit if doing signal return */
440 err |= __get_user(msr, &sc->gp_regs[PT_MSR]); 440 err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
441 /* Don't allow reserved mode. */
442 if (MSR_TM_RESV(msr))
443 return -EINVAL;
444
441 /* pull in MSR TM from user context */ 445 /* pull in MSR TM from user context */
442 regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK); 446 regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
443 447
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index 0c5d8ee657f0..d1e7b0a0feeb 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -312,6 +312,7 @@ extern void css_schedule_reprobe(void);
312extern void reipl_ccw_dev(struct ccw_dev_id *id); 312extern void reipl_ccw_dev(struct ccw_dev_id *id);
313 313
314struct cio_iplinfo { 314struct cio_iplinfo {
315 u8 ssid;
315 u16 devno; 316 u16 devno;
316 int is_qdio; 317 int is_qdio;
317}; 318};
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 3ad48f22de78..bab6739a1154 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -206,9 +206,16 @@ do { \
206} while (0) 206} while (0)
207#endif /* CONFIG_COMPAT */ 207#endif /* CONFIG_COMPAT */
208 208
209extern unsigned long mmap_rnd_mask; 209/*
210 210 * Cache aliasing on the latest machines calls for a mapping granularity
211#define STACK_RND_MASK (test_thread_flag(TIF_31BIT) ? 0x7ff : mmap_rnd_mask) 211 * of 512KB. For 64-bit processes use a 512KB alignment and a randomization
212 * of up to 1GB. For 31-bit processes the virtual address space is limited,
213 * use no alignment and limit the randomization to 8MB.
214 */
215#define BRK_RND_MASK (is_32bit_task() ? 0x7ffUL : 0x3ffffUL)
216#define MMAP_RND_MASK (is_32bit_task() ? 0x7ffUL : 0x3ff80UL)
217#define MMAP_ALIGN_MASK (is_32bit_task() ? 0 : 0x7fUL)
218#define STACK_RND_MASK MMAP_RND_MASK
212 219
213#define ARCH_DLINFO \ 220#define ARCH_DLINFO \
214do { \ 221do { \
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index 39ae6a359747..86634e71b69f 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -64,7 +64,8 @@ struct ipl_block_fcp {
64 64
65struct ipl_block_ccw { 65struct ipl_block_ccw {
66 u8 reserved1[84]; 66 u8 reserved1[84];
67 u8 reserved2[2]; 67 u16 reserved2 : 13;
68 u8 ssid : 3;
68 u16 devno; 69 u16 devno;
69 u8 vm_flags; 70 u8 vm_flags;
70 u8 reserved3[3]; 71 u8 reserved3[3];
diff --git a/arch/s390/include/asm/pci_dma.h b/arch/s390/include/asm/pci_dma.h
index 7a7abf1a5537..1aac41e83ea1 100644
--- a/arch/s390/include/asm/pci_dma.h
+++ b/arch/s390/include/asm/pci_dma.h
@@ -195,5 +195,7 @@ void zpci_dma_exit_device(struct zpci_dev *);
195void dma_free_seg_table(unsigned long); 195void dma_free_seg_table(unsigned long);
196unsigned long *dma_alloc_cpu_table(void); 196unsigned long *dma_alloc_cpu_table(void);
197void dma_cleanup_tables(unsigned long *); 197void dma_cleanup_tables(unsigned long *);
198void dma_update_cpu_trans(unsigned long *, void *, dma_addr_t, int); 198unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr);
199void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags);
200
199#endif 201#endif
diff --git a/arch/s390/include/asm/trace/diag.h b/arch/s390/include/asm/trace/diag.h
index 776f307960cc..cc6cfe7889da 100644
--- a/arch/s390/include/asm/trace/diag.h
+++ b/arch/s390/include/asm/trace/diag.h
@@ -19,7 +19,7 @@
19#define TRACE_INCLUDE_PATH asm/trace 19#define TRACE_INCLUDE_PATH asm/trace
20#define TRACE_INCLUDE_FILE diag 20#define TRACE_INCLUDE_FILE diag
21 21
22TRACE_EVENT(diagnose, 22TRACE_EVENT(s390_diagnose,
23 TP_PROTO(unsigned short nr), 23 TP_PROTO(unsigned short nr),
24 TP_ARGS(nr), 24 TP_ARGS(nr),
25 TP_STRUCT__entry( 25 TP_STRUCT__entry(
@@ -32,9 +32,9 @@ TRACE_EVENT(diagnose,
32); 32);
33 33
34#ifdef CONFIG_TRACEPOINTS 34#ifdef CONFIG_TRACEPOINTS
35void trace_diagnose_norecursion(int diag_nr); 35void trace_s390_diagnose_norecursion(int diag_nr);
36#else 36#else
37static inline void trace_diagnose_norecursion(int diag_nr) { } 37static inline void trace_s390_diagnose_norecursion(int diag_nr) { }
38#endif 38#endif
39 39
40#endif /* _TRACE_S390_DIAG_H */ 40#endif /* _TRACE_S390_DIAG_H */
diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h
index a848adba1504..34ec202472c6 100644
--- a/arch/s390/include/uapi/asm/unistd.h
+++ b/arch/s390/include/uapi/asm/unistd.h
@@ -192,14 +192,14 @@
192#define __NR_set_tid_address 252 192#define __NR_set_tid_address 252
193#define __NR_fadvise64 253 193#define __NR_fadvise64 253
194#define __NR_timer_create 254 194#define __NR_timer_create 254
195#define __NR_timer_settime (__NR_timer_create+1) 195#define __NR_timer_settime 255
196#define __NR_timer_gettime (__NR_timer_create+2) 196#define __NR_timer_gettime 256
197#define __NR_timer_getoverrun (__NR_timer_create+3) 197#define __NR_timer_getoverrun 257
198#define __NR_timer_delete (__NR_timer_create+4) 198#define __NR_timer_delete 258
199#define __NR_clock_settime (__NR_timer_create+5) 199#define __NR_clock_settime 259
200#define __NR_clock_gettime (__NR_timer_create+6) 200#define __NR_clock_gettime 260
201#define __NR_clock_getres (__NR_timer_create+7) 201#define __NR_clock_getres 261
202#define __NR_clock_nanosleep (__NR_timer_create+8) 202#define __NR_clock_nanosleep 262
203/* Number 263 is reserved for vserver */ 203/* Number 263 is reserved for vserver */
204#define __NR_statfs64 265 204#define __NR_statfs64 265
205#define __NR_fstatfs64 266 205#define __NR_fstatfs64 266
@@ -309,7 +309,8 @@
309#define __NR_recvfrom 371 309#define __NR_recvfrom 371
310#define __NR_recvmsg 372 310#define __NR_recvmsg 372
311#define __NR_shutdown 373 311#define __NR_shutdown 373
312#define NR_syscalls 374 312#define __NR_mlock2 374
313#define NR_syscalls 375
313 314
314/* 315/*
315 * There are some system calls that are not present on 64 bit, some 316 * There are some system calls that are not present on 64 bit, some
diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c
index 09f194052df3..fac4eeddef91 100644
--- a/arch/s390/kernel/compat_wrapper.c
+++ b/arch/s390/kernel/compat_wrapper.c
@@ -176,3 +176,4 @@ COMPAT_SYSCALL_WRAP4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
176COMPAT_SYSCALL_WRAP3(getsockname, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len); 176COMPAT_SYSCALL_WRAP3(getsockname, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len);
177COMPAT_SYSCALL_WRAP3(getpeername, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len); 177COMPAT_SYSCALL_WRAP3(getpeername, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len);
178COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len); 178COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len);
179COMPAT_SYSCALL_WRAP3(mlock2, unsigned long, start, size_t, len, int, flags);
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c
index f98766ede4e1..48b37b8357e6 100644
--- a/arch/s390/kernel/diag.c
+++ b/arch/s390/kernel/diag.c
@@ -121,14 +121,14 @@ device_initcall(show_diag_stat_init);
121void diag_stat_inc(enum diag_stat_enum nr) 121void diag_stat_inc(enum diag_stat_enum nr)
122{ 122{
123 this_cpu_inc(diag_stat.counter[nr]); 123 this_cpu_inc(diag_stat.counter[nr]);
124 trace_diagnose(diag_map[nr].code); 124 trace_s390_diagnose(diag_map[nr].code);
125} 125}
126EXPORT_SYMBOL(diag_stat_inc); 126EXPORT_SYMBOL(diag_stat_inc);
127 127
128void diag_stat_inc_norecursion(enum diag_stat_enum nr) 128void diag_stat_inc_norecursion(enum diag_stat_enum nr)
129{ 129{
130 this_cpu_inc(diag_stat.counter[nr]); 130 this_cpu_inc(diag_stat.counter[nr]);
131 trace_diagnose_norecursion(diag_map[nr].code); 131 trace_s390_diagnose_norecursion(diag_map[nr].code);
132} 132}
133EXPORT_SYMBOL(diag_stat_inc_norecursion); 133EXPORT_SYMBOL(diag_stat_inc_norecursion);
134 134
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 1255c6c5353e..301ee9c70688 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -26,6 +26,7 @@
26#include <asm/asm-offsets.h> 26#include <asm/asm-offsets.h>
27#include <asm/thread_info.h> 27#include <asm/thread_info.h>
28#include <asm/page.h> 28#include <asm/page.h>
29#include <asm/ptrace.h>
29 30
30#define ARCH_OFFSET 4 31#define ARCH_OFFSET 4
31 32
@@ -59,19 +60,6 @@ __HEAD
59 .long 0x020006e0,0x20000050 60 .long 0x020006e0,0x20000050
60 61
61 .org 0x200 62 .org 0x200
62#
63# subroutine to set architecture mode
64#
65.Lsetmode:
66 mvi __LC_AR_MODE_ID,1 # set esame flag
67 slr %r0,%r0 # set cpuid to zero
68 lhi %r1,2 # mode 2 = esame (dump)
69 sigp %r1,%r0,0x12 # switch to esame mode
70 bras %r13,0f
71 .fill 16,4,0x0
720: lmh %r0,%r15,0(%r13) # clear high-order half of gprs
73 sam31 # switch to 31 bit addressing mode
74 br %r14
75 63
76# 64#
77# subroutine to wait for end I/O 65# subroutine to wait for end I/O
@@ -159,7 +147,14 @@ __HEAD
159 .long 0x02200050,0x00000000 147 .long 0x02200050,0x00000000
160 148
161iplstart: 149iplstart:
162 bas %r14,.Lsetmode # Immediately switch to 64 bit mode 150 mvi __LC_AR_MODE_ID,1 # set esame flag
151 slr %r0,%r0 # set cpuid to zero
152 lhi %r1,2 # mode 2 = esame (dump)
153 sigp %r1,%r0,0x12 # switch to esame mode
154 bras %r13,0f
155 .fill 16,4,0x0
1560: lmh %r0,%r15,0(%r13) # clear high-order half of gprs
157 sam31 # switch to 31 bit addressing mode
163 lh %r1,0xb8 # test if subchannel number 158 lh %r1,0xb8 # test if subchannel number
164 bct %r1,.Lnoload # is valid 159 bct %r1,.Lnoload # is valid
165 l %r1,0xb8 # load ipl subchannel number 160 l %r1,0xb8 # load ipl subchannel number
@@ -269,71 +264,6 @@ iplstart:
269.Lcpuid:.fill 8,1,0 264.Lcpuid:.fill 8,1,0
270 265
271# 266#
272# SALIPL loader support. Based on a patch by Rob van der Heij.
273# This entry point is called directly from the SALIPL loader and
274# doesn't need a builtin ipl record.
275#
276 .org 0x800
277ENTRY(start)
278 stm %r0,%r15,0x07b0 # store registers
279 bas %r14,.Lsetmode # Immediately switch to 64 bit mode
280 basr %r12,%r0
281.base:
282 l %r11,.parm
283 l %r8,.cmd # pointer to command buffer
284
285 ltr %r9,%r9 # do we have SALIPL parameters?
286 bp .sk8x8
287
288 mvc 0(64,%r8),0x00b0 # copy saved registers
289 xc 64(240-64,%r8),0(%r8) # remainder of buffer
290 tr 0(64,%r8),.lowcase
291 b .gotr
292.sk8x8:
293 mvc 0(240,%r8),0(%r9) # copy iplparms into buffer
294.gotr:
295 slr %r0,%r0
296 st %r0,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r11)
297 st %r0,INITRD_START+ARCH_OFFSET-PARMAREA(%r11)
298 j startup # continue with startup
299.cmd: .long COMMAND_LINE # address of command line buffer
300.parm: .long PARMAREA
301.lowcase:
302 .byte 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07
303 .byte 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f
304 .byte 0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17
305 .byte 0x18,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f
306 .byte 0x20,0x21,0x22,0x23,0x24,0x25,0x26,0x27
307 .byte 0x28,0x29,0x2a,0x2b,0x2c,0x2d,0x2e,0x2f
308 .byte 0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37
309 .byte 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f
310 .byte 0x40,0x41,0x42,0x43,0x44,0x45,0x46,0x47
311 .byte 0x48,0x49,0x4a,0x4b,0x4c,0x4d,0x4e,0x4f
312 .byte 0x50,0x51,0x52,0x53,0x54,0x55,0x56,0x57
313 .byte 0x58,0x59,0x5a,0x5b,0x5c,0x5d,0x5e,0x5f
314 .byte 0x60,0x61,0x62,0x63,0x64,0x65,0x66,0x67
315 .byte 0x68,0x69,0x6a,0x6b,0x6c,0x6d,0x6e,0x6f
316 .byte 0x70,0x71,0x72,0x73,0x74,0x75,0x76,0x77
317 .byte 0x78,0x79,0x7a,0x7b,0x7c,0x7d,0x7e,0x7f
318
319 .byte 0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87
320 .byte 0x88,0x89,0x8a,0x8b,0x8c,0x8d,0x8e,0x8f
321 .byte 0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97
322 .byte 0x98,0x99,0x9a,0x9b,0x9c,0x9d,0x9e,0x9f
323 .byte 0xa0,0xa1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7
324 .byte 0xa8,0xa9,0xaa,0xab,0xac,0xad,0xae,0xaf
325 .byte 0xb0,0xb1,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7
326 .byte 0xb8,0xb9,0xba,0xbb,0xbc,0xbd,0xbe,0xbf
327 .byte 0xc0,0x81,0x82,0x83,0x84,0x85,0x86,0x87 # .abcdefg
328 .byte 0x88,0x89,0xca,0xcb,0xcc,0xcd,0xce,0xcf # hi
329 .byte 0xd0,0x91,0x92,0x93,0x94,0x95,0x96,0x97 # .jklmnop
330 .byte 0x98,0x99,0xda,0xdb,0xdc,0xdd,0xde,0xdf # qr
331 .byte 0xe0,0xe1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7 # ..stuvwx
332 .byte 0xa8,0xa9,0xea,0xeb,0xec,0xed,0xee,0xef # yz
333 .byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7
334 .byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff
335
336#
337# startup-code at 0x10000, running in absolute addressing mode 267# startup-code at 0x10000, running in absolute addressing mode
338# this is called either by the ipl loader or directly by PSW restart 268# this is called either by the ipl loader or directly by PSW restart
339# or linload or SALIPL 269# or linload or SALIPL
@@ -364,7 +294,7 @@ ENTRY(startup_kdump)
364 bras %r13,0f 294 bras %r13,0f
365 .fill 16,4,0x0 295 .fill 16,4,0x0
3660: lmh %r0,%r15,0(%r13) # clear high-order half of gprs 2960: lmh %r0,%r15,0(%r13) # clear high-order half of gprs
367 sam31 # switch to 31 bit addressing mode 297 sam64 # switch to 64 bit addressing mode
368 basr %r13,0 # get base 298 basr %r13,0 # get base
369.LPG0: 299.LPG0:
370 xc 0x200(256),0x200 # partially clear lowcore 300 xc 0x200(256),0x200 # partially clear lowcore
@@ -395,7 +325,7 @@ ENTRY(startup_kdump)
395 jnz 1b 325 jnz 1b
396 j 4f 326 j 4f
3972: l %r15,.Lstack-.LPG0(%r13) 3272: l %r15,.Lstack-.LPG0(%r13)
398 ahi %r15,-96 328 ahi %r15,-STACK_FRAME_OVERHEAD
399 la %r2,.Lals_string-.LPG0(%r13) 329 la %r2,.Lals_string-.LPG0(%r13)
400 l %r3,.Lsclp_print-.LPG0(%r13) 330 l %r3,.Lsclp_print-.LPG0(%r13)
401 basr %r14,%r3 331 basr %r14,%r3
@@ -429,8 +359,7 @@ ENTRY(startup_kdump)
429 .long 1, 0xc0000000 359 .long 1, 0xc0000000
430#endif 360#endif
4314: 3614:
432 /* Continue with 64bit startup code in head64.S */ 362 /* Continue with startup code in head64.S */
433 sam64 # switch to 64 bit mode
434 jg startup_continue 363 jg startup_continue
435 364
436 .align 8 365 .align 8
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index f6d8acd7e136..b1f0a90f933b 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -121,6 +121,7 @@ static char *dump_type_str(enum dump_type type)
121 * Must be in data section since the bss section 121 * Must be in data section since the bss section
122 * is not cleared when these are accessed. 122 * is not cleared when these are accessed.
123 */ 123 */
124static u8 ipl_ssid __attribute__((__section__(".data"))) = 0;
124static u16 ipl_devno __attribute__((__section__(".data"))) = 0; 125static u16 ipl_devno __attribute__((__section__(".data"))) = 0;
125u32 ipl_flags __attribute__((__section__(".data"))) = 0; 126u32 ipl_flags __attribute__((__section__(".data"))) = 0;
126 127
@@ -197,6 +198,33 @@ static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj, \
197 return snprintf(page, PAGE_SIZE, _format, ##args); \ 198 return snprintf(page, PAGE_SIZE, _format, ##args); \
198} 199}
199 200
201#define IPL_ATTR_CCW_STORE_FN(_prefix, _name, _ipl_blk) \
202static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
203 struct kobj_attribute *attr, \
204 const char *buf, size_t len) \
205{ \
206 unsigned long long ssid, devno; \
207 \
208 if (sscanf(buf, "0.%llx.%llx\n", &ssid, &devno) != 2) \
209 return -EINVAL; \
210 \
211 if (ssid > __MAX_SSID || devno > __MAX_SUBCHANNEL) \
212 return -EINVAL; \
213 \
214 _ipl_blk.ssid = ssid; \
215 _ipl_blk.devno = devno; \
216 return len; \
217}
218
219#define DEFINE_IPL_CCW_ATTR_RW(_prefix, _name, _ipl_blk) \
220IPL_ATTR_SHOW_FN(_prefix, _name, "0.%x.%04x\n", \
221 _ipl_blk.ssid, _ipl_blk.devno); \
222IPL_ATTR_CCW_STORE_FN(_prefix, _name, _ipl_blk); \
223static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
224 __ATTR(_name, (S_IRUGO | S_IWUSR), \
225 sys_##_prefix##_##_name##_show, \
226 sys_##_prefix##_##_name##_store) \
227
200#define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value) \ 228#define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value) \
201IPL_ATTR_SHOW_FN(_prefix, _name, _format, _value) \ 229IPL_ATTR_SHOW_FN(_prefix, _name, _format, _value) \
202static struct kobj_attribute sys_##_prefix##_##_name##_attr = \ 230static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
@@ -395,7 +423,7 @@ static ssize_t sys_ipl_device_show(struct kobject *kobj,
395 423
396 switch (ipl_info.type) { 424 switch (ipl_info.type) {
397 case IPL_TYPE_CCW: 425 case IPL_TYPE_CCW:
398 return sprintf(page, "0.0.%04x\n", ipl_devno); 426 return sprintf(page, "0.%x.%04x\n", ipl_ssid, ipl_devno);
399 case IPL_TYPE_FCP: 427 case IPL_TYPE_FCP:
400 case IPL_TYPE_FCP_DUMP: 428 case IPL_TYPE_FCP_DUMP:
401 return sprintf(page, "0.0.%04x\n", ipl->ipl_info.fcp.devno); 429 return sprintf(page, "0.0.%04x\n", ipl->ipl_info.fcp.devno);
@@ -687,21 +715,14 @@ static ssize_t reipl_fcp_scpdata_write(struct file *filp, struct kobject *kobj,
687 struct bin_attribute *attr, 715 struct bin_attribute *attr,
688 char *buf, loff_t off, size_t count) 716 char *buf, loff_t off, size_t count)
689{ 717{
718 size_t scpdata_len = count;
690 size_t padding; 719 size_t padding;
691 size_t scpdata_len;
692
693 if (off < 0)
694 return -EINVAL;
695 720
696 if (off >= DIAG308_SCPDATA_SIZE)
697 return -ENOSPC;
698 721
699 if (count > DIAG308_SCPDATA_SIZE - off) 722 if (off)
700 count = DIAG308_SCPDATA_SIZE - off; 723 return -EINVAL;
701
702 memcpy(reipl_block_fcp->ipl_info.fcp.scp_data, buf + off, count);
703 scpdata_len = off + count;
704 724
725 memcpy(reipl_block_fcp->ipl_info.fcp.scp_data, buf, count);
705 if (scpdata_len % 8) { 726 if (scpdata_len % 8) {
706 padding = 8 - (scpdata_len % 8); 727 padding = 8 - (scpdata_len % 8);
707 memset(reipl_block_fcp->ipl_info.fcp.scp_data + scpdata_len, 728 memset(reipl_block_fcp->ipl_info.fcp.scp_data + scpdata_len,
@@ -717,7 +738,7 @@ static ssize_t reipl_fcp_scpdata_write(struct file *filp, struct kobject *kobj,
717} 738}
718static struct bin_attribute sys_reipl_fcp_scp_data_attr = 739static struct bin_attribute sys_reipl_fcp_scp_data_attr =
719 __BIN_ATTR(scp_data, (S_IRUGO | S_IWUSR), reipl_fcp_scpdata_read, 740 __BIN_ATTR(scp_data, (S_IRUGO | S_IWUSR), reipl_fcp_scpdata_read,
720 reipl_fcp_scpdata_write, PAGE_SIZE); 741 reipl_fcp_scpdata_write, DIAG308_SCPDATA_SIZE);
721 742
722static struct bin_attribute *reipl_fcp_bin_attrs[] = { 743static struct bin_attribute *reipl_fcp_bin_attrs[] = {
723 &sys_reipl_fcp_scp_data_attr, 744 &sys_reipl_fcp_scp_data_attr,
@@ -814,9 +835,7 @@ static struct attribute_group reipl_fcp_attr_group = {
814}; 835};
815 836
816/* CCW reipl device attributes */ 837/* CCW reipl device attributes */
817 838DEFINE_IPL_CCW_ATTR_RW(reipl_ccw, device, reipl_block_ccw->ipl_info.ccw);
818DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
819 reipl_block_ccw->ipl_info.ccw.devno);
820 839
821/* NSS wrapper */ 840/* NSS wrapper */
822static ssize_t reipl_nss_loadparm_show(struct kobject *kobj, 841static ssize_t reipl_nss_loadparm_show(struct kobject *kobj,
@@ -1056,8 +1075,8 @@ static void __reipl_run(void *unused)
1056 1075
1057 switch (reipl_method) { 1076 switch (reipl_method) {
1058 case REIPL_METHOD_CCW_CIO: 1077 case REIPL_METHOD_CCW_CIO:
1078 devid.ssid = reipl_block_ccw->ipl_info.ccw.ssid;
1059 devid.devno = reipl_block_ccw->ipl_info.ccw.devno; 1079 devid.devno = reipl_block_ccw->ipl_info.ccw.devno;
1060 devid.ssid = 0;
1061 reipl_ccw_dev(&devid); 1080 reipl_ccw_dev(&devid);
1062 break; 1081 break;
1063 case REIPL_METHOD_CCW_VM: 1082 case REIPL_METHOD_CCW_VM:
@@ -1192,6 +1211,7 @@ static int __init reipl_ccw_init(void)
1192 1211
1193 reipl_block_ccw_init(reipl_block_ccw); 1212 reipl_block_ccw_init(reipl_block_ccw);
1194 if (ipl_info.type == IPL_TYPE_CCW) { 1213 if (ipl_info.type == IPL_TYPE_CCW) {
1214 reipl_block_ccw->ipl_info.ccw.ssid = ipl_ssid;
1195 reipl_block_ccw->ipl_info.ccw.devno = ipl_devno; 1215 reipl_block_ccw->ipl_info.ccw.devno = ipl_devno;
1196 reipl_block_ccw_fill_parms(reipl_block_ccw); 1216 reipl_block_ccw_fill_parms(reipl_block_ccw);
1197 } 1217 }
@@ -1336,9 +1356,7 @@ static struct attribute_group dump_fcp_attr_group = {
1336}; 1356};
1337 1357
1338/* CCW dump device attributes */ 1358/* CCW dump device attributes */
1339 1359DEFINE_IPL_CCW_ATTR_RW(dump_ccw, device, dump_block_ccw->ipl_info.ccw);
1340DEFINE_IPL_ATTR_RW(dump_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
1341 dump_block_ccw->ipl_info.ccw.devno);
1342 1360
1343static struct attribute *dump_ccw_attrs[] = { 1361static struct attribute *dump_ccw_attrs[] = {
1344 &sys_dump_ccw_device_attr.attr, 1362 &sys_dump_ccw_device_attr.attr,
@@ -1418,8 +1436,8 @@ static void __dump_run(void *unused)
1418 1436
1419 switch (dump_method) { 1437 switch (dump_method) {
1420 case DUMP_METHOD_CCW_CIO: 1438 case DUMP_METHOD_CCW_CIO:
1439 devid.ssid = dump_block_ccw->ipl_info.ccw.ssid;
1421 devid.devno = dump_block_ccw->ipl_info.ccw.devno; 1440 devid.devno = dump_block_ccw->ipl_info.ccw.devno;
1422 devid.ssid = 0;
1423 reipl_ccw_dev(&devid); 1441 reipl_ccw_dev(&devid);
1424 break; 1442 break;
1425 case DUMP_METHOD_CCW_VM: 1443 case DUMP_METHOD_CCW_VM:
@@ -1939,14 +1957,14 @@ void __init setup_ipl(void)
1939 ipl_info.type = get_ipl_type(); 1957 ipl_info.type = get_ipl_type();
1940 switch (ipl_info.type) { 1958 switch (ipl_info.type) {
1941 case IPL_TYPE_CCW: 1959 case IPL_TYPE_CCW:
1960 ipl_info.data.ccw.dev_id.ssid = ipl_ssid;
1942 ipl_info.data.ccw.dev_id.devno = ipl_devno; 1961 ipl_info.data.ccw.dev_id.devno = ipl_devno;
1943 ipl_info.data.ccw.dev_id.ssid = 0;
1944 break; 1962 break;
1945 case IPL_TYPE_FCP: 1963 case IPL_TYPE_FCP:
1946 case IPL_TYPE_FCP_DUMP: 1964 case IPL_TYPE_FCP_DUMP:
1965 ipl_info.data.fcp.dev_id.ssid = 0;
1947 ipl_info.data.fcp.dev_id.devno = 1966 ipl_info.data.fcp.dev_id.devno =
1948 IPL_PARMBLOCK_START->ipl_info.fcp.devno; 1967 IPL_PARMBLOCK_START->ipl_info.fcp.devno;
1949 ipl_info.data.fcp.dev_id.ssid = 0;
1950 ipl_info.data.fcp.wwpn = IPL_PARMBLOCK_START->ipl_info.fcp.wwpn; 1968 ipl_info.data.fcp.wwpn = IPL_PARMBLOCK_START->ipl_info.fcp.wwpn;
1951 ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun; 1969 ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun;
1952 break; 1970 break;
@@ -1978,6 +1996,7 @@ void __init ipl_save_parameters(void)
1978 if (cio_get_iplinfo(&iplinfo)) 1996 if (cio_get_iplinfo(&iplinfo))
1979 return; 1997 return;
1980 1998
1999 ipl_ssid = iplinfo.ssid;
1981 ipl_devno = iplinfo.devno; 2000 ipl_devno = iplinfo.devno;
1982 ipl_flags |= IPL_DEVNO_VALID; 2001 ipl_flags |= IPL_DEVNO_VALID;
1983 if (!iplinfo.is_qdio) 2002 if (!iplinfo.is_qdio)
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 688a3aad9c79..114ee8b96f17 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -243,11 +243,7 @@ unsigned long arch_align_stack(unsigned long sp)
243 243
244static inline unsigned long brk_rnd(void) 244static inline unsigned long brk_rnd(void)
245{ 245{
246 /* 8MB for 32bit, 1GB for 64bit */ 246 return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT;
247 if (is_32bit_task())
248 return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
249 else
250 return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
251} 247}
252 248
253unsigned long arch_randomize_brk(struct mm_struct *mm) 249unsigned long arch_randomize_brk(struct mm_struct *mm)
diff --git a/arch/s390/kernel/sclp.c b/arch/s390/kernel/sclp.c
index fa0bdff1d413..9fe7781a45cd 100644
--- a/arch/s390/kernel/sclp.c
+++ b/arch/s390/kernel/sclp.c
@@ -21,7 +21,7 @@ static void _sclp_wait_int(void)
21 __ctl_load(cr0_new, 0, 0); 21 __ctl_load(cr0_new, 0, 0);
22 22
23 psw_ext_save = S390_lowcore.external_new_psw; 23 psw_ext_save = S390_lowcore.external_new_psw;
24 psw_mask = __extract_psw() & (PSW_MASK_EA | PSW_MASK_BA); 24 psw_mask = __extract_psw();
25 S390_lowcore.external_new_psw.mask = psw_mask; 25 S390_lowcore.external_new_psw.mask = psw_mask;
26 psw_wait.mask = psw_mask | PSW_MASK_EXT | PSW_MASK_WAIT; 26 psw_wait.mask = psw_mask | PSW_MASK_EXT | PSW_MASK_WAIT;
27 S390_lowcore.ext_int_code = 0; 27 S390_lowcore.ext_int_code = 0;
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index ce0cbd6ba7ca..c837bcacf218 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -764,9 +764,6 @@ static int __init setup_hwcaps(void)
764 get_cpu_id(&cpu_id); 764 get_cpu_id(&cpu_id);
765 add_device_randomness(&cpu_id, sizeof(cpu_id)); 765 add_device_randomness(&cpu_id, sizeof(cpu_id));
766 switch (cpu_id.machine) { 766 switch (cpu_id.machine) {
767 case 0x9672:
768 strcpy(elf_platform, "g5");
769 break;
770 case 0x2064: 767 case 0x2064:
771 case 0x2066: 768 case 0x2066:
772 default: /* Use "z900" as default for 64 bit kernels. */ 769 default: /* Use "z900" as default for 64 bit kernels. */
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 8c56929c8d82..5378c3ea1b98 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -382,3 +382,4 @@ SYSCALL(sys_sendmsg,compat_sys_sendmsg) /* 370 */
382SYSCALL(sys_recvfrom,compat_sys_recvfrom) 382SYSCALL(sys_recvfrom,compat_sys_recvfrom)
383SYSCALL(sys_recvmsg,compat_sys_recvmsg) 383SYSCALL(sys_recvmsg,compat_sys_recvmsg)
384SYSCALL(sys_shutdown,sys_shutdown) 384SYSCALL(sys_shutdown,sys_shutdown)
385SYSCALL(sys_mlock2,compat_sys_mlock2)
diff --git a/arch/s390/kernel/trace.c b/arch/s390/kernel/trace.c
index 73239bb576c4..21a5df99552b 100644
--- a/arch/s390/kernel/trace.c
+++ b/arch/s390/kernel/trace.c
@@ -9,11 +9,11 @@
9#define CREATE_TRACE_POINTS 9#define CREATE_TRACE_POINTS
10#include <asm/trace/diag.h> 10#include <asm/trace/diag.h>
11 11
12EXPORT_TRACEPOINT_SYMBOL(diagnose); 12EXPORT_TRACEPOINT_SYMBOL(s390_diagnose);
13 13
14static DEFINE_PER_CPU(unsigned int, diagnose_trace_depth); 14static DEFINE_PER_CPU(unsigned int, diagnose_trace_depth);
15 15
16void trace_diagnose_norecursion(int diag_nr) 16void trace_s390_diagnose_norecursion(int diag_nr)
17{ 17{
18 unsigned long flags; 18 unsigned long flags;
19 unsigned int *depth; 19 unsigned int *depth;
@@ -22,7 +22,7 @@ void trace_diagnose_norecursion(int diag_nr)
22 depth = this_cpu_ptr(&diagnose_trace_depth); 22 depth = this_cpu_ptr(&diagnose_trace_depth);
23 if (*depth == 0) { 23 if (*depth == 0) {
24 (*depth)++; 24 (*depth)++;
25 trace_diagnose(diag_nr); 25 trace_s390_diagnose(diag_nr);
26 (*depth)--; 26 (*depth)--;
27 } 27 }
28 local_irq_restore(flags); 28 local_irq_restore(flags);
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 373e32346d68..6a75352f453c 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -1030,8 +1030,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1030 src_id, 0); 1030 src_id, 0);
1031 1031
1032 /* sending vcpu invalid */ 1032 /* sending vcpu invalid */
1033 if (src_id >= KVM_MAX_VCPUS || 1033 if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL)
1034 kvm_get_vcpu(vcpu->kvm, src_id) == NULL)
1035 return -EINVAL; 1034 return -EINVAL;
1036 1035
1037 if (sclp.has_sigpif) 1036 if (sclp.has_sigpif)
@@ -1110,6 +1109,10 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
1110 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, 1109 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
1111 irq->u.emerg.code, 0); 1110 irq->u.emerg.code, 0);
1112 1111
1112 /* sending vcpu invalid */
1113 if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL)
1114 return -EINVAL;
1115
1113 set_bit(irq->u.emerg.code, li->sigp_emerg_pending); 1116 set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
1114 set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); 1117 set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
1115 atomic_or(CPUSTAT_EXT_INT, li->cpuflags); 1118 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 8fe2f1c722dc..846589281b04 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -342,12 +342,16 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
342 r = 0; 342 r = 0;
343 break; 343 break;
344 case KVM_CAP_S390_VECTOR_REGISTERS: 344 case KVM_CAP_S390_VECTOR_REGISTERS:
345 if (MACHINE_HAS_VX) { 345 mutex_lock(&kvm->lock);
346 if (atomic_read(&kvm->online_vcpus)) {
347 r = -EBUSY;
348 } else if (MACHINE_HAS_VX) {
346 set_kvm_facility(kvm->arch.model.fac->mask, 129); 349 set_kvm_facility(kvm->arch.model.fac->mask, 129);
347 set_kvm_facility(kvm->arch.model.fac->list, 129); 350 set_kvm_facility(kvm->arch.model.fac->list, 129);
348 r = 0; 351 r = 0;
349 } else 352 } else
350 r = -EINVAL; 353 r = -EINVAL;
354 mutex_unlock(&kvm->lock);
351 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s", 355 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
352 r ? "(not available)" : "(success)"); 356 r ? "(not available)" : "(success)");
353 break; 357 break;
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 77191b85ea7a..d76b51cb4b62 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -660,7 +660,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
660 660
661 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2); 661 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
662 662
663 if (!MACHINE_HAS_PFMF) 663 if (!test_kvm_facility(vcpu->kvm, 8))
664 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 664 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
665 665
666 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 666 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index da690b69f9fe..77c22d685c7a 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -291,12 +291,8 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
291 u16 cpu_addr, u32 parameter, u64 *status_reg) 291 u16 cpu_addr, u32 parameter, u64 *status_reg)
292{ 292{
293 int rc; 293 int rc;
294 struct kvm_vcpu *dst_vcpu; 294 struct kvm_vcpu *dst_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr);
295 295
296 if (cpu_addr >= KVM_MAX_VCPUS)
297 return SIGP_CC_NOT_OPERATIONAL;
298
299 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
300 if (!dst_vcpu) 296 if (!dst_vcpu)
301 return SIGP_CC_NOT_OPERATIONAL; 297 return SIGP_CC_NOT_OPERATIONAL;
302 298
@@ -478,7 +474,7 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
478 trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr); 474 trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr);
479 475
480 if (order_code == SIGP_EXTERNAL_CALL) { 476 if (order_code == SIGP_EXTERNAL_CALL) {
481 dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); 477 dest_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr);
482 BUG_ON(dest_vcpu == NULL); 478 BUG_ON(dest_vcpu == NULL);
483 479
484 kvm_s390_vcpu_wakeup(dest_vcpu); 480 kvm_s390_vcpu_wakeup(dest_vcpu);
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index c3c07d3505ba..c722400c7697 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -48,37 +48,13 @@ EXPORT_SYMBOL(zero_page_mask);
48 48
49static void __init setup_zero_pages(void) 49static void __init setup_zero_pages(void)
50{ 50{
51 struct cpuid cpu_id;
52 unsigned int order; 51 unsigned int order;
53 struct page *page; 52 struct page *page;
54 int i; 53 int i;
55 54
56 get_cpu_id(&cpu_id); 55 /* Latest machines require a mapping granularity of 512KB */
57 switch (cpu_id.machine) { 56 order = 7;
58 case 0x9672: /* g5 */ 57
59 case 0x2064: /* z900 */
60 case 0x2066: /* z900 */
61 case 0x2084: /* z990 */
62 case 0x2086: /* z990 */
63 case 0x2094: /* z9-109 */
64 case 0x2096: /* z9-109 */
65 order = 0;
66 break;
67 case 0x2097: /* z10 */
68 case 0x2098: /* z10 */
69 case 0x2817: /* z196 */
70 case 0x2818: /* z196 */
71 order = 2;
72 break;
73 case 0x2827: /* zEC12 */
74 case 0x2828: /* zEC12 */
75 order = 5;
76 break;
77 case 0x2964: /* z13 */
78 default:
79 order = 7;
80 break;
81 }
82 /* Limit number of empty zero pages for small memory sizes */ 58 /* Limit number of empty zero pages for small memory sizes */
83 while (order > 2 && (totalram_pages >> 10) < (1UL << order)) 59 while (order > 2 && (totalram_pages >> 10) < (1UL << order))
84 order--; 60 order--;
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 6e552af08c76..ea01477b4aa6 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -31,9 +31,6 @@
31#include <linux/security.h> 31#include <linux/security.h>
32#include <asm/pgalloc.h> 32#include <asm/pgalloc.h>
33 33
34unsigned long mmap_rnd_mask;
35static unsigned long mmap_align_mask;
36
37static unsigned long stack_maxrandom_size(void) 34static unsigned long stack_maxrandom_size(void)
38{ 35{
39 if (!(current->flags & PF_RANDOMIZE)) 36 if (!(current->flags & PF_RANDOMIZE))
@@ -62,10 +59,7 @@ static inline int mmap_is_legacy(void)
62 59
63unsigned long arch_mmap_rnd(void) 60unsigned long arch_mmap_rnd(void)
64{ 61{
65 if (is_32bit_task()) 62 return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT;
66 return (get_random_int() & 0x7ff) << PAGE_SHIFT;
67 else
68 return (get_random_int() & mmap_rnd_mask) << PAGE_SHIFT;
69} 63}
70 64
71static unsigned long mmap_base_legacy(unsigned long rnd) 65static unsigned long mmap_base_legacy(unsigned long rnd)
@@ -92,7 +86,6 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
92 struct mm_struct *mm = current->mm; 86 struct mm_struct *mm = current->mm;
93 struct vm_area_struct *vma; 87 struct vm_area_struct *vma;
94 struct vm_unmapped_area_info info; 88 struct vm_unmapped_area_info info;
95 int do_color_align;
96 89
97 if (len > TASK_SIZE - mmap_min_addr) 90 if (len > TASK_SIZE - mmap_min_addr)
98 return -ENOMEM; 91 return -ENOMEM;
@@ -108,15 +101,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
108 return addr; 101 return addr;
109 } 102 }
110 103
111 do_color_align = 0;
112 if (filp || (flags & MAP_SHARED))
113 do_color_align = !is_32bit_task();
114
115 info.flags = 0; 104 info.flags = 0;
116 info.length = len; 105 info.length = len;
117 info.low_limit = mm->mmap_base; 106 info.low_limit = mm->mmap_base;
118 info.high_limit = TASK_SIZE; 107 info.high_limit = TASK_SIZE;
119 info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0; 108 if (filp || (flags & MAP_SHARED))
109 info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
110 else
111 info.align_mask = 0;
120 info.align_offset = pgoff << PAGE_SHIFT; 112 info.align_offset = pgoff << PAGE_SHIFT;
121 return vm_unmapped_area(&info); 113 return vm_unmapped_area(&info);
122} 114}
@@ -130,7 +122,6 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
130 struct mm_struct *mm = current->mm; 122 struct mm_struct *mm = current->mm;
131 unsigned long addr = addr0; 123 unsigned long addr = addr0;
132 struct vm_unmapped_area_info info; 124 struct vm_unmapped_area_info info;
133 int do_color_align;
134 125
135 /* requested length too big for entire address space */ 126 /* requested length too big for entire address space */
136 if (len > TASK_SIZE - mmap_min_addr) 127 if (len > TASK_SIZE - mmap_min_addr)
@@ -148,15 +139,14 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
148 return addr; 139 return addr;
149 } 140 }
150 141
151 do_color_align = 0;
152 if (filp || (flags & MAP_SHARED))
153 do_color_align = !is_32bit_task();
154
155 info.flags = VM_UNMAPPED_AREA_TOPDOWN; 142 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
156 info.length = len; 143 info.length = len;
157 info.low_limit = max(PAGE_SIZE, mmap_min_addr); 144 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
158 info.high_limit = mm->mmap_base; 145 info.high_limit = mm->mmap_base;
159 info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0; 146 if (filp || (flags & MAP_SHARED))
147 info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
148 else
149 info.align_mask = 0;
160 info.align_offset = pgoff << PAGE_SHIFT; 150 info.align_offset = pgoff << PAGE_SHIFT;
161 addr = vm_unmapped_area(&info); 151 addr = vm_unmapped_area(&info);
162 152
@@ -254,35 +244,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
254 mm->get_unmapped_area = s390_get_unmapped_area_topdown; 244 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
255 } 245 }
256} 246}
257
258static int __init setup_mmap_rnd(void)
259{
260 struct cpuid cpu_id;
261
262 get_cpu_id(&cpu_id);
263 switch (cpu_id.machine) {
264 case 0x9672:
265 case 0x2064:
266 case 0x2066:
267 case 0x2084:
268 case 0x2086:
269 case 0x2094:
270 case 0x2096:
271 case 0x2097:
272 case 0x2098:
273 case 0x2817:
274 case 0x2818:
275 case 0x2827:
276 case 0x2828:
277 mmap_rnd_mask = 0x7ffUL;
278 mmap_align_mask = 0UL;
279 break;
280 case 0x2964: /* z13 */
281 default:
282 mmap_rnd_mask = 0x3ff80UL;
283 mmap_align_mask = 0x7fUL;
284 break;
285 }
286 return 0;
287}
288early_initcall(setup_mmap_rnd);
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 37d10f74425a..d348f2c09a1e 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -33,7 +33,7 @@ unsigned long *dma_alloc_cpu_table(void)
33 return NULL; 33 return NULL;
34 34
35 for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++) 35 for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
36 *entry = ZPCI_TABLE_INVALID | ZPCI_TABLE_PROTECTED; 36 *entry = ZPCI_TABLE_INVALID;
37 return table; 37 return table;
38} 38}
39 39
@@ -51,7 +51,7 @@ static unsigned long *dma_alloc_page_table(void)
51 return NULL; 51 return NULL;
52 52
53 for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++) 53 for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
54 *entry = ZPCI_PTE_INVALID | ZPCI_TABLE_PROTECTED; 54 *entry = ZPCI_PTE_INVALID;
55 return table; 55 return table;
56} 56}
57 57
@@ -95,7 +95,7 @@ static unsigned long *dma_get_page_table_origin(unsigned long *entry)
95 return pto; 95 return pto;
96} 96}
97 97
98static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr) 98unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
99{ 99{
100 unsigned long *sto, *pto; 100 unsigned long *sto, *pto;
101 unsigned int rtx, sx, px; 101 unsigned int rtx, sx, px;
@@ -114,20 +114,10 @@ static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr
114 return &pto[px]; 114 return &pto[px];
115} 115}
116 116
117void dma_update_cpu_trans(unsigned long *dma_table, void *page_addr, 117void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags)
118 dma_addr_t dma_addr, int flags)
119{ 118{
120 unsigned long *entry;
121
122 entry = dma_walk_cpu_trans(dma_table, dma_addr);
123 if (!entry) {
124 WARN_ON_ONCE(1);
125 return;
126 }
127
128 if (flags & ZPCI_PTE_INVALID) { 119 if (flags & ZPCI_PTE_INVALID) {
129 invalidate_pt_entry(entry); 120 invalidate_pt_entry(entry);
130 return;
131 } else { 121 } else {
132 set_pt_pfaa(entry, page_addr); 122 set_pt_pfaa(entry, page_addr);
133 validate_pt_entry(entry); 123 validate_pt_entry(entry);
@@ -146,18 +136,25 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
146 u8 *page_addr = (u8 *) (pa & PAGE_MASK); 136 u8 *page_addr = (u8 *) (pa & PAGE_MASK);
147 dma_addr_t start_dma_addr = dma_addr; 137 dma_addr_t start_dma_addr = dma_addr;
148 unsigned long irq_flags; 138 unsigned long irq_flags;
139 unsigned long *entry;
149 int i, rc = 0; 140 int i, rc = 0;
150 141
151 if (!nr_pages) 142 if (!nr_pages)
152 return -EINVAL; 143 return -EINVAL;
153 144
154 spin_lock_irqsave(&zdev->dma_table_lock, irq_flags); 145 spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
155 if (!zdev->dma_table) 146 if (!zdev->dma_table) {
147 rc = -EINVAL;
156 goto no_refresh; 148 goto no_refresh;
149 }
157 150
158 for (i = 0; i < nr_pages; i++) { 151 for (i = 0; i < nr_pages; i++) {
159 dma_update_cpu_trans(zdev->dma_table, page_addr, dma_addr, 152 entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
160 flags); 153 if (!entry) {
154 rc = -ENOMEM;
155 goto undo_cpu_trans;
156 }
157 dma_update_cpu_trans(entry, page_addr, flags);
161 page_addr += PAGE_SIZE; 158 page_addr += PAGE_SIZE;
162 dma_addr += PAGE_SIZE; 159 dma_addr += PAGE_SIZE;
163 } 160 }
@@ -176,6 +173,18 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
176 173
177 rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr, 174 rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr,
178 nr_pages * PAGE_SIZE); 175 nr_pages * PAGE_SIZE);
176undo_cpu_trans:
177 if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
178 flags = ZPCI_PTE_INVALID;
179 while (i-- > 0) {
180 page_addr -= PAGE_SIZE;
181 dma_addr -= PAGE_SIZE;
182 entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
183 if (!entry)
184 break;
185 dma_update_cpu_trans(entry, page_addr, flags);
186 }
187 }
179 188
180no_refresh: 189no_refresh:
181 spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags); 190 spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
@@ -260,6 +269,16 @@ out:
260 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags); 269 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
261} 270}
262 271
272static inline void zpci_err_dma(unsigned long rc, unsigned long addr)
273{
274 struct {
275 unsigned long rc;
276 unsigned long addr;
277 } __packed data = {rc, addr};
278
279 zpci_err_hex(&data, sizeof(data));
280}
281
263static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, 282static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
264 unsigned long offset, size_t size, 283 unsigned long offset, size_t size,
265 enum dma_data_direction direction, 284 enum dma_data_direction direction,
@@ -270,33 +289,40 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
270 unsigned long pa = page_to_phys(page) + offset; 289 unsigned long pa = page_to_phys(page) + offset;
271 int flags = ZPCI_PTE_VALID; 290 int flags = ZPCI_PTE_VALID;
272 dma_addr_t dma_addr; 291 dma_addr_t dma_addr;
292 int ret;
273 293
274 /* This rounds up number of pages based on size and offset */ 294 /* This rounds up number of pages based on size and offset */
275 nr_pages = iommu_num_pages(pa, size, PAGE_SIZE); 295 nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
276 iommu_page_index = dma_alloc_iommu(zdev, nr_pages); 296 iommu_page_index = dma_alloc_iommu(zdev, nr_pages);
277 if (iommu_page_index == -1) 297 if (iommu_page_index == -1) {
298 ret = -ENOSPC;
278 goto out_err; 299 goto out_err;
300 }
279 301
280 /* Use rounded up size */ 302 /* Use rounded up size */
281 size = nr_pages * PAGE_SIZE; 303 size = nr_pages * PAGE_SIZE;
282 304
283 dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE; 305 dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE;
284 if (dma_addr + size > zdev->end_dma) 306 if (dma_addr + size > zdev->end_dma) {
307 ret = -ERANGE;
285 goto out_free; 308 goto out_free;
309 }
286 310
287 if (direction == DMA_NONE || direction == DMA_TO_DEVICE) 311 if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
288 flags |= ZPCI_TABLE_PROTECTED; 312 flags |= ZPCI_TABLE_PROTECTED;
289 313
290 if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) { 314 ret = dma_update_trans(zdev, pa, dma_addr, size, flags);
291 atomic64_add(nr_pages, &zdev->mapped_pages); 315 if (ret)
292 return dma_addr + (offset & ~PAGE_MASK); 316 goto out_free;
293 } 317
318 atomic64_add(nr_pages, &zdev->mapped_pages);
319 return dma_addr + (offset & ~PAGE_MASK);
294 320
295out_free: 321out_free:
296 dma_free_iommu(zdev, iommu_page_index, nr_pages); 322 dma_free_iommu(zdev, iommu_page_index, nr_pages);
297out_err: 323out_err:
298 zpci_err("map error:\n"); 324 zpci_err("map error:\n");
299 zpci_err_hex(&pa, sizeof(pa)); 325 zpci_err_dma(ret, pa);
300 return DMA_ERROR_CODE; 326 return DMA_ERROR_CODE;
301} 327}
302 328
@@ -306,14 +332,16 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
306{ 332{
307 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); 333 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
308 unsigned long iommu_page_index; 334 unsigned long iommu_page_index;
309 int npages; 335 int npages, ret;
310 336
311 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); 337 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
312 dma_addr = dma_addr & PAGE_MASK; 338 dma_addr = dma_addr & PAGE_MASK;
313 if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE, 339 ret = dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
314 ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID)) { 340 ZPCI_PTE_INVALID);
341 if (ret) {
315 zpci_err("unmap error:\n"); 342 zpci_err("unmap error:\n");
316 zpci_err_hex(&dma_addr, sizeof(dma_addr)); 343 zpci_err_dma(ret, dma_addr);
344 return;
317 } 345 }
318 346
319 atomic64_add(npages, &zdev->unmapped_pages); 347 atomic64_add(npages, &zdev->unmapped_pages);
diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
index 0033e96c3f09..9011a88353de 100644
--- a/arch/x86/boot/boot.h
+++ b/arch/x86/boot/boot.h
@@ -23,7 +23,6 @@
23#include <stdarg.h> 23#include <stdarg.h>
24#include <linux/types.h> 24#include <linux/types.h>
25#include <linux/edd.h> 25#include <linux/edd.h>
26#include <asm/boot.h>
27#include <asm/setup.h> 26#include <asm/setup.h>
28#include "bitops.h" 27#include "bitops.h"
29#include "ctype.h" 28#include "ctype.h"
diff --git a/arch/x86/boot/video-mode.c b/arch/x86/boot/video-mode.c
index aa8a96b052e3..95c7a818c0ed 100644
--- a/arch/x86/boot/video-mode.c
+++ b/arch/x86/boot/video-mode.c
@@ -19,6 +19,8 @@
19#include "video.h" 19#include "video.h"
20#include "vesa.h" 20#include "vesa.h"
21 21
22#include <uapi/asm/boot.h>
23
22/* 24/*
23 * Common variables 25 * Common variables
24 */ 26 */
diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
index 05111bb8d018..77780e386e9b 100644
--- a/arch/x86/boot/video.c
+++ b/arch/x86/boot/video.c
@@ -13,6 +13,8 @@
13 * Select video mode 13 * Select video mode
14 */ 14 */
15 15
16#include <uapi/asm/boot.h>
17
16#include "boot.h" 18#include "boot.h"
17#include "video.h" 19#include "video.h"
18#include "vesa.h" 20#include "vesa.h"
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 53616ca03244..a55697d19824 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -509,6 +509,17 @@ END(irq_entries_start)
509 * tracking that we're in kernel mode. 509 * tracking that we're in kernel mode.
510 */ 510 */
511 SWAPGS 511 SWAPGS
512
513 /*
514 * We need to tell lockdep that IRQs are off. We can't do this until
515 * we fix gsbase, and we should do it before enter_from_user_mode
516 * (which can take locks). Since TRACE_IRQS_OFF idempotent,
517 * the simplest way to handle it is to just call it twice if
518 * we enter from user mode. There's no reason to optimize this since
519 * TRACE_IRQS_OFF is a no-op if lockdep is off.
520 */
521 TRACE_IRQS_OFF
522
512#ifdef CONFIG_CONTEXT_TRACKING 523#ifdef CONFIG_CONTEXT_TRACKING
513 call enter_from_user_mode 524 call enter_from_user_mode
514#endif 525#endif
@@ -1049,12 +1060,18 @@ ENTRY(error_entry)
1049 SWAPGS 1060 SWAPGS
1050 1061
1051.Lerror_entry_from_usermode_after_swapgs: 1062.Lerror_entry_from_usermode_after_swapgs:
1063 /*
1064 * We need to tell lockdep that IRQs are off. We can't do this until
1065 * we fix gsbase, and we should do it before enter_from_user_mode
1066 * (which can take locks).
1067 */
1068 TRACE_IRQS_OFF
1052#ifdef CONFIG_CONTEXT_TRACKING 1069#ifdef CONFIG_CONTEXT_TRACKING
1053 call enter_from_user_mode 1070 call enter_from_user_mode
1054#endif 1071#endif
1072 ret
1055 1073
1056.Lerror_entry_done: 1074.Lerror_entry_done:
1057
1058 TRACE_IRQS_OFF 1075 TRACE_IRQS_OFF
1059 ret 1076 ret
1060 1077
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 9f3905697f12..690b4027e17c 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -35,7 +35,7 @@
35#define MSR_IA32_PERFCTR0 0x000000c1 35#define MSR_IA32_PERFCTR0 0x000000c1
36#define MSR_IA32_PERFCTR1 0x000000c2 36#define MSR_IA32_PERFCTR1 0x000000c2
37#define MSR_FSB_FREQ 0x000000cd 37#define MSR_FSB_FREQ 0x000000cd
38#define MSR_NHM_PLATFORM_INFO 0x000000ce 38#define MSR_PLATFORM_INFO 0x000000ce
39 39
40#define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2 40#define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2
41#define NHM_C3_AUTO_DEMOTE (1UL << 25) 41#define NHM_C3_AUTO_DEMOTE (1UL << 25)
@@ -44,7 +44,6 @@
44#define SNB_C1_AUTO_UNDEMOTE (1UL << 27) 44#define SNB_C1_AUTO_UNDEMOTE (1UL << 27)
45#define SNB_C3_AUTO_UNDEMOTE (1UL << 28) 45#define SNB_C3_AUTO_UNDEMOTE (1UL << 28)
46 46
47#define MSR_PLATFORM_INFO 0x000000ce
48#define MSR_MTRRcap 0x000000fe 47#define MSR_MTRRcap 0x000000fe
49#define MSR_IA32_BBL_CR_CTL 0x00000119 48#define MSR_IA32_BBL_CR_CTL 0x00000119
50#define MSR_IA32_BBL_CR_CTL3 0x0000011e 49#define MSR_IA32_BBL_CR_CTL3 0x0000011e
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
index c5b7fb2774d0..cc071c6f7d4d 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -9,19 +9,21 @@
9#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) 9#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
10#define PAGE_MASK (~(PAGE_SIZE-1)) 10#define PAGE_MASK (~(PAGE_SIZE-1))
11 11
12#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
13#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
14
15#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
16#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
17
12#define __PHYSICAL_MASK ((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1)) 18#define __PHYSICAL_MASK ((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1))
13#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) 19#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
14 20
15/* Cast PAGE_MASK to a signed type so that it is sign-extended if 21/* Cast *PAGE_MASK to a signed type so that it is sign-extended if
16 virtual addresses are 32-bits but physical addresses are larger 22 virtual addresses are 32-bits but physical addresses are larger
17 (ie, 32-bit PAE). */ 23 (ie, 32-bit PAE). */
18#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK) 24#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK)
19 25#define PHYSICAL_PMD_PAGE_MASK (((signed long)PMD_PAGE_MASK) & __PHYSICAL_MASK)
20#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT) 26#define PHYSICAL_PUD_PAGE_MASK (((signed long)PUD_PAGE_MASK) & __PHYSICAL_MASK)
21#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
22
23#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
24#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
25 27
26#define HPAGE_SHIFT PMD_SHIFT 28#define HPAGE_SHIFT PMD_SHIFT
27#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) 29#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index dd5b0aa9dd2f..a471cadb9630 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -279,17 +279,14 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
279static inline pudval_t pud_pfn_mask(pud_t pud) 279static inline pudval_t pud_pfn_mask(pud_t pud)
280{ 280{
281 if (native_pud_val(pud) & _PAGE_PSE) 281 if (native_pud_val(pud) & _PAGE_PSE)
282 return PUD_PAGE_MASK & PHYSICAL_PAGE_MASK; 282 return PHYSICAL_PUD_PAGE_MASK;
283 else 283 else
284 return PTE_PFN_MASK; 284 return PTE_PFN_MASK;
285} 285}
286 286
287static inline pudval_t pud_flags_mask(pud_t pud) 287static inline pudval_t pud_flags_mask(pud_t pud)
288{ 288{
289 if (native_pud_val(pud) & _PAGE_PSE) 289 return ~pud_pfn_mask(pud);
290 return ~(PUD_PAGE_MASK & (pudval_t)PHYSICAL_PAGE_MASK);
291 else
292 return ~PTE_PFN_MASK;
293} 290}
294 291
295static inline pudval_t pud_flags(pud_t pud) 292static inline pudval_t pud_flags(pud_t pud)
@@ -300,17 +297,14 @@ static inline pudval_t pud_flags(pud_t pud)
300static inline pmdval_t pmd_pfn_mask(pmd_t pmd) 297static inline pmdval_t pmd_pfn_mask(pmd_t pmd)
301{ 298{
302 if (native_pmd_val(pmd) & _PAGE_PSE) 299 if (native_pmd_val(pmd) & _PAGE_PSE)
303 return PMD_PAGE_MASK & PHYSICAL_PAGE_MASK; 300 return PHYSICAL_PMD_PAGE_MASK;
304 else 301 else
305 return PTE_PFN_MASK; 302 return PTE_PFN_MASK;
306} 303}
307 304
308static inline pmdval_t pmd_flags_mask(pmd_t pmd) 305static inline pmdval_t pmd_flags_mask(pmd_t pmd)
309{ 306{
310 if (native_pmd_val(pmd) & _PAGE_PSE) 307 return ~pmd_pfn_mask(pmd);
311 return ~(PMD_PAGE_MASK & (pmdval_t)PHYSICAL_PAGE_MASK);
312 else
313 return ~PTE_PFN_MASK;
314} 308}
315 309
316static inline pmdval_t pmd_flags(pmd_t pmd) 310static inline pmdval_t pmd_flags(pmd_t pmd)
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 48d34d28f5a6..cd0fc0cc78bc 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -1,7 +1,6 @@
1#ifndef _ASM_X86_PLATFORM_H 1#ifndef _ASM_X86_PLATFORM_H
2#define _ASM_X86_PLATFORM_H 2#define _ASM_X86_PLATFORM_H
3 3
4#include <asm/pgtable_types.h>
5#include <asm/bootparam.h> 4#include <asm/bootparam.h>
6 5
7struct mpc_bus; 6struct mpc_bus;
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 4ddd780aeac9..c2b7522cbf35 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -273,10 +273,9 @@ __setup("nosmap", setup_disable_smap);
273 273
274static __always_inline void setup_smap(struct cpuinfo_x86 *c) 274static __always_inline void setup_smap(struct cpuinfo_x86 *c)
275{ 275{
276 unsigned long eflags; 276 unsigned long eflags = native_save_fl();
277 277
278 /* This should have been cleared long ago */ 278 /* This should have been cleared long ago */
279 raw_local_save_flags(eflags);
280 BUG_ON(eflags & X86_EFLAGS_AC); 279 BUG_ON(eflags & X86_EFLAGS_AC);
281 280
282 if (cpu_has(c, X86_FEATURE_SMAP)) { 281 if (cpu_has(c, X86_FEATURE_SMAP)) {
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 7fc27f1cca58..b3e94ef461fd 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -698,3 +698,4 @@ int __init microcode_init(void)
698 return error; 698 return error;
699 699
700} 700}
701late_initcall(microcode_init);
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index db9a675e751b..bca14c899137 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -547,6 +547,7 @@ static const struct pci_device_id intel_stolen_ids[] __initconst = {
547 INTEL_CHV_IDS(&chv_stolen_funcs), 547 INTEL_CHV_IDS(&chv_stolen_funcs),
548 INTEL_SKL_IDS(&gen9_stolen_funcs), 548 INTEL_SKL_IDS(&gen9_stolen_funcs),
549 INTEL_BXT_IDS(&gen9_stolen_funcs), 549 INTEL_BXT_IDS(&gen9_stolen_funcs),
550 INTEL_KBL_IDS(&gen9_stolen_funcs),
550}; 551};
551 552
552static void __init intel_graphics_stolen(int num, int slot, int func) 553static void __init intel_graphics_stolen(int num, int slot, int func)
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index ef29b742cea7..31c6a60505e6 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -385,20 +385,19 @@ fpu__alloc_mathframe(unsigned long sp, int ia32_frame,
385 */ 385 */
386void fpu__init_prepare_fx_sw_frame(void) 386void fpu__init_prepare_fx_sw_frame(void)
387{ 387{
388 int fsave_header_size = sizeof(struct fregs_state);
389 int size = xstate_size + FP_XSTATE_MAGIC2_SIZE; 388 int size = xstate_size + FP_XSTATE_MAGIC2_SIZE;
390 389
391 if (config_enabled(CONFIG_X86_32))
392 size += fsave_header_size;
393
394 fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1; 390 fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1;
395 fx_sw_reserved.extended_size = size; 391 fx_sw_reserved.extended_size = size;
396 fx_sw_reserved.xfeatures = xfeatures_mask; 392 fx_sw_reserved.xfeatures = xfeatures_mask;
397 fx_sw_reserved.xstate_size = xstate_size; 393 fx_sw_reserved.xstate_size = xstate_size;
398 394
399 if (config_enabled(CONFIG_IA32_EMULATION)) { 395 if (config_enabled(CONFIG_IA32_EMULATION) ||
396 config_enabled(CONFIG_X86_32)) {
397 int fsave_header_size = sizeof(struct fregs_state);
398
400 fx_sw_reserved_ia32 = fx_sw_reserved; 399 fx_sw_reserved_ia32 = fx_sw_reserved;
401 fx_sw_reserved_ia32.extended_size += fsave_header_size; 400 fx_sw_reserved_ia32.extended_size = size + fsave_header_size;
402 } 401 }
403} 402}
404 403
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 6454f2731b56..70fc312221fc 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -694,7 +694,6 @@ void *get_xsave_addr(struct xregs_state *xsave, int xstate_feature)
694 if (!boot_cpu_has(X86_FEATURE_XSAVE)) 694 if (!boot_cpu_has(X86_FEATURE_XSAVE))
695 return NULL; 695 return NULL;
696 696
697 xsave = &current->thread.fpu.state.xsave;
698 /* 697 /*
699 * We should not ever be requesting features that we 698 * We should not ever be requesting features that we
700 * have not enabled. Remember that pcntxt_mask is 699 * have not enabled. Remember that pcntxt_mask is
diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
index 94ea120fa21f..87e1762e2bca 100644
--- a/arch/x86/kernel/mcount_64.S
+++ b/arch/x86/kernel/mcount_64.S
@@ -278,6 +278,12 @@ trace:
278 /* save_mcount_regs fills in first two parameters */ 278 /* save_mcount_regs fills in first two parameters */
279 save_mcount_regs 279 save_mcount_regs
280 280
281 /*
282 * When DYNAMIC_FTRACE is not defined, ARCH_SUPPORTS_FTRACE_OPS is not
283 * set (see include/asm/ftrace.h and include/linux/ftrace.h). Only the
284 * ip and parent ip are used and the list function is called when
285 * function tracing is enabled.
286 */
281 call *ftrace_trace_function 287 call *ftrace_trace_function
282 288
283 restore_mcount_regs 289 restore_mcount_regs
diff --git a/arch/x86/kernel/pmem.c b/arch/x86/kernel/pmem.c
index 4f00b63d7ff3..14415aff1813 100644
--- a/arch/x86/kernel/pmem.c
+++ b/arch/x86/kernel/pmem.c
@@ -4,10 +4,22 @@
4 */ 4 */
5#include <linux/platform_device.h> 5#include <linux/platform_device.h>
6#include <linux/module.h> 6#include <linux/module.h>
7#include <linux/ioport.h>
8
9static int found(u64 start, u64 end, void *data)
10{
11 return 1;
12}
7 13
8static __init int register_e820_pmem(void) 14static __init int register_e820_pmem(void)
9{ 15{
16 char *pmem = "Persistent Memory (legacy)";
10 struct platform_device *pdev; 17 struct platform_device *pdev;
18 int rc;
19
20 rc = walk_iomem_res(pmem, IORESOURCE_MEM, 0, -1, NULL, found);
21 if (rc <= 0)
22 return 0;
11 23
12 /* 24 /*
13 * See drivers/nvdimm/e820.c for the implementation, this is 25 * See drivers/nvdimm/e820.c for the implementation, this is
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 29db25f9a745..d2bbe343fda7 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1250,8 +1250,6 @@ void __init setup_arch(char **cmdline_p)
1250 if (efi_enabled(EFI_BOOT)) 1250 if (efi_enabled(EFI_BOOT))
1251 efi_apply_memmap_quirks(); 1251 efi_apply_memmap_quirks();
1252#endif 1252#endif
1253
1254 microcode_init();
1255} 1253}
1256 1254
1257#ifdef CONFIG_X86_32 1255#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index b7ffb7c00075..cb6282c3638f 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -690,12 +690,15 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
690 signal_setup_done(failed, ksig, stepping); 690 signal_setup_done(failed, ksig, stepping);
691} 691}
692 692
693#ifdef CONFIG_X86_32 693static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
694#define NR_restart_syscall __NR_restart_syscall 694{
695#else /* !CONFIG_X86_32 */ 695#if defined(CONFIG_X86_32) || !defined(CONFIG_X86_64)
696#define NR_restart_syscall \ 696 return __NR_restart_syscall;
697 test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall : __NR_restart_syscall 697#else /* !CONFIG_X86_32 && CONFIG_X86_64 */
698#endif /* CONFIG_X86_32 */ 698 return test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall :
699 __NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT);
700#endif /* CONFIG_X86_32 || !CONFIG_X86_64 */
701}
699 702
700/* 703/*
701 * Note that 'init' is a special process: it doesn't get signals it doesn't 704 * Note that 'init' is a special process: it doesn't get signals it doesn't
@@ -724,7 +727,7 @@ void do_signal(struct pt_regs *regs)
724 break; 727 break;
725 728
726 case -ERESTART_RESTARTBLOCK: 729 case -ERESTART_RESTARTBLOCK:
727 regs->ax = NR_restart_syscall; 730 regs->ax = get_nr_restart_syscall(regs);
728 regs->ip -= 2; 731 regs->ip -= 2;
729 break; 732 break;
730 } 733 }
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 892ee2e5ecbc..fbabe4fcc7fb 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -509,7 +509,7 @@ void __inquire_remote_apic(int apicid)
509 */ 509 */
510#define UDELAY_10MS_DEFAULT 10000 510#define UDELAY_10MS_DEFAULT 10000
511 511
512static unsigned int init_udelay = INT_MAX; 512static unsigned int init_udelay = UINT_MAX;
513 513
514static int __init cpu_init_udelay(char *str) 514static int __init cpu_init_udelay(char *str)
515{ 515{
@@ -522,14 +522,15 @@ early_param("cpu_init_udelay", cpu_init_udelay);
522static void __init smp_quirk_init_udelay(void) 522static void __init smp_quirk_init_udelay(void)
523{ 523{
524 /* if cmdline changed it from default, leave it alone */ 524 /* if cmdline changed it from default, leave it alone */
525 if (init_udelay != INT_MAX) 525 if (init_udelay != UINT_MAX)
526 return; 526 return;
527 527
528 /* if modern processor, use no delay */ 528 /* if modern processor, use no delay */
529 if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) || 529 if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) ||
530 ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) 530 ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) {
531 init_udelay = 0; 531 init_udelay = 0;
532 532 return;
533 }
533 /* else, use legacy delay */ 534 /* else, use legacy delay */
534 init_udelay = UDELAY_10MS_DEFAULT; 535 init_udelay = UDELAY_10MS_DEFAULT;
535} 536}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 87acc5221740..af823a388c19 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -7394,11 +7394,6 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
7394 7394
7395 switch (type) { 7395 switch (type) {
7396 case VMX_VPID_EXTENT_ALL_CONTEXT: 7396 case VMX_VPID_EXTENT_ALL_CONTEXT:
7397 if (get_vmcs12(vcpu)->virtual_processor_id == 0) {
7398 nested_vmx_failValid(vcpu,
7399 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
7400 return 1;
7401 }
7402 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02); 7397 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02);
7403 nested_vmx_succeed(vcpu); 7398 nested_vmx_succeed(vcpu);
7404 break; 7399 break;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 00462bd63129..eed32283d22c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2763,6 +2763,26 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
2763 return 0; 2763 return 0;
2764} 2764}
2765 2765
2766static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
2767{
2768 return (!lapic_in_kernel(vcpu) ||
2769 kvm_apic_accept_pic_intr(vcpu));
2770}
2771
2772/*
2773 * if userspace requested an interrupt window, check that the
2774 * interrupt window is open.
2775 *
2776 * No need to exit to userspace if we already have an interrupt queued.
2777 */
2778static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
2779{
2780 return kvm_arch_interrupt_allowed(vcpu) &&
2781 !kvm_cpu_has_interrupt(vcpu) &&
2782 !kvm_event_needs_reinjection(vcpu) &&
2783 kvm_cpu_accept_dm_intr(vcpu);
2784}
2785
2766static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, 2786static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
2767 struct kvm_interrupt *irq) 2787 struct kvm_interrupt *irq)
2768{ 2788{
@@ -2786,6 +2806,7 @@ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
2786 return -EEXIST; 2806 return -EEXIST;
2787 2807
2788 vcpu->arch.pending_external_vector = irq->irq; 2808 vcpu->arch.pending_external_vector = irq->irq;
2809 kvm_make_request(KVM_REQ_EVENT, vcpu);
2789 return 0; 2810 return 0;
2790} 2811}
2791 2812
@@ -5910,23 +5931,10 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
5910 return emulator_write_emulated(ctxt, rip, instruction, 3, NULL); 5931 return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
5911} 5932}
5912 5933
5913/*
5914 * Check if userspace requested an interrupt window, and that the
5915 * interrupt window is open.
5916 *
5917 * No need to exit to userspace if we already have an interrupt queued.
5918 */
5919static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) 5934static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
5920{ 5935{
5921 if (!vcpu->run->request_interrupt_window || pic_in_kernel(vcpu->kvm)) 5936 return vcpu->run->request_interrupt_window &&
5922 return false; 5937 likely(!pic_in_kernel(vcpu->kvm));
5923
5924 if (kvm_cpu_has_interrupt(vcpu))
5925 return false;
5926
5927 return (irqchip_split(vcpu->kvm)
5928 ? kvm_apic_accept_pic_intr(vcpu)
5929 : kvm_arch_interrupt_allowed(vcpu));
5930} 5938}
5931 5939
5932static void post_kvm_run_save(struct kvm_vcpu *vcpu) 5940static void post_kvm_run_save(struct kvm_vcpu *vcpu)
@@ -5937,17 +5945,9 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
5937 kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0; 5945 kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0;
5938 kvm_run->cr8 = kvm_get_cr8(vcpu); 5946 kvm_run->cr8 = kvm_get_cr8(vcpu);
5939 kvm_run->apic_base = kvm_get_apic_base(vcpu); 5947 kvm_run->apic_base = kvm_get_apic_base(vcpu);
5940 if (!irqchip_in_kernel(vcpu->kvm)) 5948 kvm_run->ready_for_interrupt_injection =
5941 kvm_run->ready_for_interrupt_injection = 5949 pic_in_kernel(vcpu->kvm) ||
5942 kvm_arch_interrupt_allowed(vcpu) && 5950 kvm_vcpu_ready_for_interrupt_injection(vcpu);
5943 !kvm_cpu_has_interrupt(vcpu) &&
5944 !kvm_event_needs_reinjection(vcpu);
5945 else if (!pic_in_kernel(vcpu->kvm))
5946 kvm_run->ready_for_interrupt_injection =
5947 kvm_apic_accept_pic_intr(vcpu) &&
5948 !kvm_cpu_has_interrupt(vcpu);
5949 else
5950 kvm_run->ready_for_interrupt_injection = 1;
5951} 5951}
5952 5952
5953static void update_cr8_intercept(struct kvm_vcpu *vcpu) 5953static void update_cr8_intercept(struct kvm_vcpu *vcpu)
@@ -6360,8 +6360,10 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
6360static int vcpu_enter_guest(struct kvm_vcpu *vcpu) 6360static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
6361{ 6361{
6362 int r; 6362 int r;
6363 bool req_int_win = !lapic_in_kernel(vcpu) && 6363 bool req_int_win =
6364 vcpu->run->request_interrupt_window; 6364 dm_request_for_irq_injection(vcpu) &&
6365 kvm_cpu_accept_dm_intr(vcpu);
6366
6365 bool req_immediate_exit = false; 6367 bool req_immediate_exit = false;
6366 6368
6367 if (vcpu->requests) { 6369 if (vcpu->requests) {
@@ -6663,7 +6665,8 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
6663 if (kvm_cpu_has_pending_timer(vcpu)) 6665 if (kvm_cpu_has_pending_timer(vcpu))
6664 kvm_inject_pending_timer_irqs(vcpu); 6666 kvm_inject_pending_timer_irqs(vcpu);
6665 6667
6666 if (dm_request_for_irq_injection(vcpu)) { 6668 if (dm_request_for_irq_injection(vcpu) &&
6669 kvm_vcpu_ready_for_interrupt_injection(vcpu)) {
6667 r = 0; 6670 r = 0;
6668 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; 6671 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
6669 ++vcpu->stat.request_irq_exits; 6672 ++vcpu->stat.request_irq_exits;
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index b0ae85f90f10..b2fd67da1701 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -101,19 +101,19 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
101 switch (type) { 101 switch (type) {
102 case REG_TYPE_RM: 102 case REG_TYPE_RM:
103 regno = X86_MODRM_RM(insn->modrm.value); 103 regno = X86_MODRM_RM(insn->modrm.value);
104 if (X86_REX_B(insn->rex_prefix.value) == 1) 104 if (X86_REX_B(insn->rex_prefix.value))
105 regno += 8; 105 regno += 8;
106 break; 106 break;
107 107
108 case REG_TYPE_INDEX: 108 case REG_TYPE_INDEX:
109 regno = X86_SIB_INDEX(insn->sib.value); 109 regno = X86_SIB_INDEX(insn->sib.value);
110 if (X86_REX_X(insn->rex_prefix.value) == 1) 110 if (X86_REX_X(insn->rex_prefix.value))
111 regno += 8; 111 regno += 8;
112 break; 112 break;
113 113
114 case REG_TYPE_BASE: 114 case REG_TYPE_BASE:
115 regno = X86_SIB_BASE(insn->sib.value); 115 regno = X86_SIB_BASE(insn->sib.value);
116 if (X86_REX_B(insn->rex_prefix.value) == 1) 116 if (X86_REX_B(insn->rex_prefix.value))
117 regno += 8; 117 regno += 8;
118 break; 118 break;
119 119
@@ -586,6 +586,29 @@ static unsigned long mpx_bd_entry_to_bt_addr(struct mm_struct *mm,
586} 586}
587 587
588/* 588/*
589 * We only want to do a 4-byte get_user() on 32-bit. Otherwise,
590 * we might run off the end of the bounds table if we are on
591 * a 64-bit kernel and try to get 8 bytes.
592 */
593int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret,
594 long __user *bd_entry_ptr)
595{
596 u32 bd_entry_32;
597 int ret;
598
599 if (is_64bit_mm(mm))
600 return get_user(*bd_entry_ret, bd_entry_ptr);
601
602 /*
603 * Note that get_user() uses the type of the *pointer* to
604 * establish the size of the get, not the destination.
605 */
606 ret = get_user(bd_entry_32, (u32 __user *)bd_entry_ptr);
607 *bd_entry_ret = bd_entry_32;
608 return ret;
609}
610
611/*
589 * Get the base of bounds tables pointed by specific bounds 612 * Get the base of bounds tables pointed by specific bounds
590 * directory entry. 613 * directory entry.
591 */ 614 */
@@ -605,7 +628,7 @@ static int get_bt_addr(struct mm_struct *mm,
605 int need_write = 0; 628 int need_write = 0;
606 629
607 pagefault_disable(); 630 pagefault_disable();
608 ret = get_user(bd_entry, bd_entry_ptr); 631 ret = get_user_bd_entry(mm, &bd_entry, bd_entry_ptr);
609 pagefault_enable(); 632 pagefault_enable();
610 if (!ret) 633 if (!ret)
611 break; 634 break;
@@ -700,11 +723,23 @@ static unsigned long mpx_get_bt_entry_offset_bytes(struct mm_struct *mm,
700 */ 723 */
701static inline unsigned long bd_entry_virt_space(struct mm_struct *mm) 724static inline unsigned long bd_entry_virt_space(struct mm_struct *mm)
702{ 725{
703 unsigned long long virt_space = (1ULL << boot_cpu_data.x86_virt_bits); 726 unsigned long long virt_space;
704 if (is_64bit_mm(mm)) 727 unsigned long long GB = (1ULL << 30);
705 return virt_space / MPX_BD_NR_ENTRIES_64; 728
706 else 729 /*
707 return virt_space / MPX_BD_NR_ENTRIES_32; 730 * This covers 32-bit emulation as well as 32-bit kernels
731 * running on 64-bit harware.
732 */
733 if (!is_64bit_mm(mm))
734 return (4ULL * GB) / MPX_BD_NR_ENTRIES_32;
735
736 /*
737 * 'x86_virt_bits' returns what the hardware is capable
738 * of, and returns the full >32-bit adddress space when
739 * running 32-bit kernels on 64-bit hardware.
740 */
741 virt_space = (1ULL << boot_cpu_data.x86_virt_bits);
742 return virt_space / MPX_BD_NR_ENTRIES_64;
708} 743}
709 744
710/* 745/*
diff --git a/arch/x86/pci/bus_numa.c b/arch/x86/pci/bus_numa.c
index 7bcf06a7cd12..6eb3c8af96e2 100644
--- a/arch/x86/pci/bus_numa.c
+++ b/arch/x86/pci/bus_numa.c
@@ -50,18 +50,9 @@ void x86_pci_root_bus_resources(int bus, struct list_head *resources)
50 if (!found) 50 if (!found)
51 pci_add_resource(resources, &info->busn); 51 pci_add_resource(resources, &info->busn);
52 52
53 list_for_each_entry(root_res, &info->resources, list) { 53 list_for_each_entry(root_res, &info->resources, list)
54 struct resource *res; 54 pci_add_resource(resources, &root_res->res);
55 struct resource *root;
56 55
57 res = &root_res->res;
58 pci_add_resource(resources, res);
59 if (res->flags & IORESOURCE_IO)
60 root = &ioport_resource;
61 else
62 root = &iomem_resource;
63 insert_resource(root, res);
64 }
65 return; 56 return;
66 57
67default_resources: 58default_resources:
diff --git a/block/blk-core.c b/block/blk-core.c
index 5131993b23a1..a0af4043dda2 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2114,7 +2114,8 @@ blk_qc_t submit_bio(int rw, struct bio *bio)
2114EXPORT_SYMBOL(submit_bio); 2114EXPORT_SYMBOL(submit_bio);
2115 2115
2116/** 2116/**
2117 * blk_rq_check_limits - Helper function to check a request for the queue limit 2117 * blk_cloned_rq_check_limits - Helper function to check a cloned request
2118 * for new the queue limits
2118 * @q: the queue 2119 * @q: the queue
2119 * @rq: the request being checked 2120 * @rq: the request being checked
2120 * 2121 *
@@ -2125,20 +2126,13 @@ EXPORT_SYMBOL(submit_bio);
2125 * after it is inserted to @q, it should be checked against @q before 2126 * after it is inserted to @q, it should be checked against @q before
2126 * the insertion using this generic function. 2127 * the insertion using this generic function.
2127 * 2128 *
2128 * This function should also be useful for request stacking drivers
2129 * in some cases below, so export this function.
2130 * Request stacking drivers like request-based dm may change the queue 2129 * Request stacking drivers like request-based dm may change the queue
2131 * limits while requests are in the queue (e.g. dm's table swapping). 2130 * limits when retrying requests on other queues. Those requests need
2132 * Such request stacking drivers should check those requests against 2131 * to be checked against the new queue limits again during dispatch.
2133 * the new queue limits again when they dispatch those requests,
2134 * although such checkings are also done against the old queue limits
2135 * when submitting requests.
2136 */ 2132 */
2137int blk_rq_check_limits(struct request_queue *q, struct request *rq) 2133static int blk_cloned_rq_check_limits(struct request_queue *q,
2134 struct request *rq)
2138{ 2135{
2139 if (!rq_mergeable(rq))
2140 return 0;
2141
2142 if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) { 2136 if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) {
2143 printk(KERN_ERR "%s: over max size limit.\n", __func__); 2137 printk(KERN_ERR "%s: over max size limit.\n", __func__);
2144 return -EIO; 2138 return -EIO;
@@ -2158,7 +2152,6 @@ int blk_rq_check_limits(struct request_queue *q, struct request *rq)
2158 2152
2159 return 0; 2153 return 0;
2160} 2154}
2161EXPORT_SYMBOL_GPL(blk_rq_check_limits);
2162 2155
2163/** 2156/**
2164 * blk_insert_cloned_request - Helper for stacking drivers to submit a request 2157 * blk_insert_cloned_request - Helper for stacking drivers to submit a request
@@ -2170,7 +2163,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
2170 unsigned long flags; 2163 unsigned long flags;
2171 int where = ELEVATOR_INSERT_BACK; 2164 int where = ELEVATOR_INSERT_BACK;
2172 2165
2173 if (blk_rq_check_limits(q, rq)) 2166 if (blk_cloned_rq_check_limits(q, rq))
2174 return -EIO; 2167 return -EIO;
2175 2168
2176 if (rq->rq_disk && 2169 if (rq->rq_disk &&
diff --git a/block/blk-merge.c b/block/blk-merge.c
index de5716d8e525..e01405a3e8b3 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -76,6 +76,9 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
76 struct bio_vec bv, bvprv, *bvprvp = NULL; 76 struct bio_vec bv, bvprv, *bvprvp = NULL;
77 struct bvec_iter iter; 77 struct bvec_iter iter;
78 unsigned seg_size = 0, nsegs = 0, sectors = 0; 78 unsigned seg_size = 0, nsegs = 0, sectors = 0;
79 unsigned front_seg_size = bio->bi_seg_front_size;
80 bool do_split = true;
81 struct bio *new = NULL;
79 82
80 bio_for_each_segment(bv, bio, iter) { 83 bio_for_each_segment(bv, bio, iter) {
81 if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q)) 84 if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q))
@@ -98,8 +101,11 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
98 101
99 seg_size += bv.bv_len; 102 seg_size += bv.bv_len;
100 bvprv = bv; 103 bvprv = bv;
101 bvprvp = &bv; 104 bvprvp = &bvprv;
102 sectors += bv.bv_len >> 9; 105 sectors += bv.bv_len >> 9;
106
107 if (nsegs == 1 && seg_size > front_seg_size)
108 front_seg_size = seg_size;
103 continue; 109 continue;
104 } 110 }
105new_segment: 111new_segment:
@@ -108,16 +114,29 @@ new_segment:
108 114
109 nsegs++; 115 nsegs++;
110 bvprv = bv; 116 bvprv = bv;
111 bvprvp = &bv; 117 bvprvp = &bvprv;
112 seg_size = bv.bv_len; 118 seg_size = bv.bv_len;
113 sectors += bv.bv_len >> 9; 119 sectors += bv.bv_len >> 9;
120
121 if (nsegs == 1 && seg_size > front_seg_size)
122 front_seg_size = seg_size;
114 } 123 }
115 124
116 *segs = nsegs; 125 do_split = false;
117 return NULL;
118split: 126split:
119 *segs = nsegs; 127 *segs = nsegs;
120 return bio_split(bio, sectors, GFP_NOIO, bs); 128
129 if (do_split) {
130 new = bio_split(bio, sectors, GFP_NOIO, bs);
131 if (new)
132 bio = new;
133 }
134
135 bio->bi_seg_front_size = front_seg_size;
136 if (seg_size > bio->bi_seg_back_size)
137 bio->bi_seg_back_size = seg_size;
138
139 return do_split ? new : NULL;
121} 140}
122 141
123void blk_queue_split(struct request_queue *q, struct bio **bio, 142void blk_queue_split(struct request_queue *q, struct bio **bio,
@@ -412,6 +431,12 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
412 if (sg) 431 if (sg)
413 sg_mark_end(sg); 432 sg_mark_end(sg);
414 433
434 /*
435 * Something must have been wrong if the figured number of
436 * segment is bigger than number of req's physical segments
437 */
438 WARN_ON(nsegs > rq->nr_phys_segments);
439
415 return nsegs; 440 return nsegs;
416} 441}
417EXPORT_SYMBOL(blk_rq_map_sg); 442EXPORT_SYMBOL(blk_rq_map_sg);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 3ae09de62f19..6d6f8feb48c0 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1291,15 +1291,16 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1291 blk_mq_bio_to_request(rq, bio); 1291 blk_mq_bio_to_request(rq, bio);
1292 1292
1293 /* 1293 /*
1294 * we do limited pluging. If bio can be merged, do merge. 1294 * We do limited pluging. If the bio can be merged, do that.
1295 * Otherwise the existing request in the plug list will be 1295 * Otherwise the existing request in the plug list will be
1296 * issued. So the plug list will have one request at most 1296 * issued. So the plug list will have one request at most
1297 */ 1297 */
1298 if (plug) { 1298 if (plug) {
1299 /* 1299 /*
1300 * The plug list might get flushed before this. If that 1300 * The plug list might get flushed before this. If that
1301 * happens, same_queue_rq is invalid and plug list is empty 1301 * happens, same_queue_rq is invalid and plug list is
1302 **/ 1302 * empty
1303 */
1303 if (same_queue_rq && !list_empty(&plug->mq_list)) { 1304 if (same_queue_rq && !list_empty(&plug->mq_list)) {
1304 old_rq = same_queue_rq; 1305 old_rq = same_queue_rq;
1305 list_del_init(&old_rq->queuelist); 1306 list_del_init(&old_rq->queuelist);
@@ -1380,12 +1381,15 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
1380 blk_mq_bio_to_request(rq, bio); 1381 blk_mq_bio_to_request(rq, bio);
1381 if (!request_count) 1382 if (!request_count)
1382 trace_block_plug(q); 1383 trace_block_plug(q);
1383 else if (request_count >= BLK_MAX_REQUEST_COUNT) { 1384
1385 blk_mq_put_ctx(data.ctx);
1386
1387 if (request_count >= BLK_MAX_REQUEST_COUNT) {
1384 blk_flush_plug_list(plug, false); 1388 blk_flush_plug_list(plug, false);
1385 trace_block_plug(q); 1389 trace_block_plug(q);
1386 } 1390 }
1391
1387 list_add_tail(&rq->queuelist, &plug->mq_list); 1392 list_add_tail(&rq->queuelist, &plug->mq_list);
1388 blk_mq_put_ctx(data.ctx);
1389 return cookie; 1393 return cookie;
1390 } 1394 }
1391 1395
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 7d8f129a1516..dd4973583978 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -91,7 +91,8 @@ void blk_set_default_limits(struct queue_limits *lim)
91 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; 91 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
92 lim->virt_boundary_mask = 0; 92 lim->virt_boundary_mask = 0;
93 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; 93 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
94 lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS; 94 lim->max_sectors = lim->max_dev_sectors = lim->max_hw_sectors =
95 BLK_SAFE_MAX_SECTORS;
95 lim->chunk_sectors = 0; 96 lim->chunk_sectors = 0;
96 lim->max_write_same_sectors = 0; 97 lim->max_write_same_sectors = 0;
97 lim->max_discard_sectors = 0; 98 lim->max_discard_sectors = 0;
@@ -127,6 +128,7 @@ void blk_set_stacking_limits(struct queue_limits *lim)
127 lim->max_hw_sectors = UINT_MAX; 128 lim->max_hw_sectors = UINT_MAX;
128 lim->max_segment_size = UINT_MAX; 129 lim->max_segment_size = UINT_MAX;
129 lim->max_sectors = UINT_MAX; 130 lim->max_sectors = UINT_MAX;
131 lim->max_dev_sectors = UINT_MAX;
130 lim->max_write_same_sectors = UINT_MAX; 132 lim->max_write_same_sectors = UINT_MAX;
131} 133}
132EXPORT_SYMBOL(blk_set_stacking_limits); 134EXPORT_SYMBOL(blk_set_stacking_limits);
@@ -214,8 +216,8 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
214EXPORT_SYMBOL(blk_queue_bounce_limit); 216EXPORT_SYMBOL(blk_queue_bounce_limit);
215 217
216/** 218/**
217 * blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request 219 * blk_queue_max_hw_sectors - set max sectors for a request for this queue
218 * @limits: the queue limits 220 * @q: the request queue for the device
219 * @max_hw_sectors: max hardware sectors in the usual 512b unit 221 * @max_hw_sectors: max hardware sectors in the usual 512b unit
220 * 222 *
221 * Description: 223 * Description:
@@ -224,13 +226,19 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
224 * the device driver based upon the capabilities of the I/O 226 * the device driver based upon the capabilities of the I/O
225 * controller. 227 * controller.
226 * 228 *
229 * max_dev_sectors is a hard limit imposed by the storage device for
230 * READ/WRITE requests. It is set by the disk driver.
231 *
227 * max_sectors is a soft limit imposed by the block layer for 232 * max_sectors is a soft limit imposed by the block layer for
228 * filesystem type requests. This value can be overridden on a 233 * filesystem type requests. This value can be overridden on a
229 * per-device basis in /sys/block/<device>/queue/max_sectors_kb. 234 * per-device basis in /sys/block/<device>/queue/max_sectors_kb.
230 * The soft limit can not exceed max_hw_sectors. 235 * The soft limit can not exceed max_hw_sectors.
231 **/ 236 **/
232void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors) 237void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
233{ 238{
239 struct queue_limits *limits = &q->limits;
240 unsigned int max_sectors;
241
234 if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) { 242 if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
235 max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9); 243 max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
236 printk(KERN_INFO "%s: set to minimum %d\n", 244 printk(KERN_INFO "%s: set to minimum %d\n",
@@ -238,22 +246,9 @@ void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_
238 } 246 }
239 247
240 limits->max_hw_sectors = max_hw_sectors; 248 limits->max_hw_sectors = max_hw_sectors;
241 limits->max_sectors = min_t(unsigned int, max_hw_sectors, 249 max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
242 BLK_DEF_MAX_SECTORS); 250 max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
243} 251 limits->max_sectors = max_sectors;
244EXPORT_SYMBOL(blk_limits_max_hw_sectors);
245
246/**
247 * blk_queue_max_hw_sectors - set max sectors for a request for this queue
248 * @q: the request queue for the device
249 * @max_hw_sectors: max hardware sectors in the usual 512b unit
250 *
251 * Description:
252 * See description for blk_limits_max_hw_sectors().
253 **/
254void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
255{
256 blk_limits_max_hw_sectors(&q->limits, max_hw_sectors);
257} 252}
258EXPORT_SYMBOL(blk_queue_max_hw_sectors); 253EXPORT_SYMBOL(blk_queue_max_hw_sectors);
259 254
@@ -527,6 +522,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
527 522
528 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); 523 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
529 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); 524 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
525 t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
530 t->max_write_same_sectors = min(t->max_write_same_sectors, 526 t->max_write_same_sectors = min(t->max_write_same_sectors,
531 b->max_write_same_sectors); 527 b->max_write_same_sectors);
532 t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn); 528 t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 565b8dac5782..e140cc487ce1 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -205,6 +205,9 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
205 if (ret < 0) 205 if (ret < 0)
206 return ret; 206 return ret;
207 207
208 max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long)
209 q->limits.max_dev_sectors >> 1);
210
208 if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) 211 if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
209 return -EINVAL; 212 return -EINVAL;
210 213
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index 246dfb16c3d9..aa40aa93381b 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -158,11 +158,13 @@ void blk_abort_request(struct request *req)
158{ 158{
159 if (blk_mark_rq_complete(req)) 159 if (blk_mark_rq_complete(req))
160 return; 160 return;
161 blk_delete_timer(req); 161
162 if (req->q->mq_ops) 162 if (req->q->mq_ops) {
163 blk_mq_rq_timed_out(req, false); 163 blk_mq_rq_timed_out(req, false);
164 else 164 } else {
165 blk_delete_timer(req);
165 blk_rq_timed_out(req); 166 blk_rq_timed_out(req);
167 }
166} 168}
167EXPORT_SYMBOL_GPL(blk_abort_request); 169EXPORT_SYMBOL_GPL(blk_abort_request);
168 170
diff --git a/block/blk.h b/block/blk.h
index da722eb786df..c43926d3d74d 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -72,8 +72,6 @@ void blk_dequeue_request(struct request *rq);
72void __blk_queue_free_tags(struct request_queue *q); 72void __blk_queue_free_tags(struct request_queue *q);
73bool __blk_end_bidi_request(struct request *rq, int error, 73bool __blk_end_bidi_request(struct request *rq, int error,
74 unsigned int nr_bytes, unsigned int bidi_bytes); 74 unsigned int nr_bytes, unsigned int bidi_bytes);
75int blk_queue_enter(struct request_queue *q, gfp_t gfp);
76void blk_queue_exit(struct request_queue *q);
77void blk_freeze_queue(struct request_queue *q); 75void blk_freeze_queue(struct request_queue *q);
78 76
79static inline void blk_queue_enter_live(struct request_queue *q) 77static inline void blk_queue_enter_live(struct request_queue *q)
diff --git a/block/noop-iosched.c b/block/noop-iosched.c
index 3de89d4690f3..a163c487cf38 100644
--- a/block/noop-iosched.c
+++ b/block/noop-iosched.c
@@ -21,10 +21,10 @@ static void noop_merged_requests(struct request_queue *q, struct request *rq,
21static int noop_dispatch(struct request_queue *q, int force) 21static int noop_dispatch(struct request_queue *q, int force)
22{ 22{
23 struct noop_data *nd = q->elevator->elevator_data; 23 struct noop_data *nd = q->elevator->elevator_data;
24 struct request *rq;
24 25
25 if (!list_empty(&nd->queue)) { 26 rq = list_first_entry_or_null(&nd->queue, struct request, queuelist);
26 struct request *rq; 27 if (rq) {
27 rq = list_entry(nd->queue.next, struct request, queuelist);
28 list_del_init(&rq->queuelist); 28 list_del_init(&rq->queuelist);
29 elv_dispatch_sort(q, rq); 29 elv_dispatch_sort(q, rq);
30 return 1; 30 return 1;
@@ -46,7 +46,7 @@ noop_former_request(struct request_queue *q, struct request *rq)
46 46
47 if (rq->queuelist.prev == &nd->queue) 47 if (rq->queuelist.prev == &nd->queue)
48 return NULL; 48 return NULL;
49 return list_entry(rq->queuelist.prev, struct request, queuelist); 49 return list_prev_entry(rq, queuelist);
50} 50}
51 51
52static struct request * 52static struct request *
@@ -56,7 +56,7 @@ noop_latter_request(struct request_queue *q, struct request *rq)
56 56
57 if (rq->queuelist.next == &nd->queue) 57 if (rq->queuelist.next == &nd->queue)
58 return NULL; 58 return NULL;
59 return list_entry(rq->queuelist.next, struct request, queuelist); 59 return list_next_entry(rq, queuelist);
60} 60}
61 61
62static int noop_init_queue(struct request_queue *q, struct elevator_type *e) 62static int noop_init_queue(struct request_queue *q, struct elevator_type *e)
diff --git a/block/partition-generic.c b/block/partition-generic.c
index 3b030157ec85..746935a5973c 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -397,7 +397,7 @@ static int drop_partitions(struct gendisk *disk, struct block_device *bdev)
397 struct hd_struct *part; 397 struct hd_struct *part;
398 int res; 398 int res;
399 399
400 if (bdev->bd_part_count) 400 if (bdev->bd_part_count || bdev->bd_super)
401 return -EBUSY; 401 return -EBUSY;
402 res = invalidate_partition(disk, 0); 402 res = invalidate_partition(disk, 0);
403 if (res) 403 if (res)
diff --git a/block/partitions/mac.c b/block/partitions/mac.c
index c2c48ec64b27..621317ac4d59 100644
--- a/block/partitions/mac.c
+++ b/block/partitions/mac.c
@@ -32,7 +32,7 @@ int mac_partition(struct parsed_partitions *state)
32 Sector sect; 32 Sector sect;
33 unsigned char *data; 33 unsigned char *data;
34 int slot, blocks_in_map; 34 int slot, blocks_in_map;
35 unsigned secsize; 35 unsigned secsize, datasize, partoffset;
36#ifdef CONFIG_PPC_PMAC 36#ifdef CONFIG_PPC_PMAC
37 int found_root = 0; 37 int found_root = 0;
38 int found_root_goodness = 0; 38 int found_root_goodness = 0;
@@ -50,10 +50,14 @@ int mac_partition(struct parsed_partitions *state)
50 } 50 }
51 secsize = be16_to_cpu(md->block_size); 51 secsize = be16_to_cpu(md->block_size);
52 put_dev_sector(sect); 52 put_dev_sector(sect);
53 data = read_part_sector(state, secsize/512, &sect); 53 datasize = round_down(secsize, 512);
54 data = read_part_sector(state, datasize / 512, &sect);
54 if (!data) 55 if (!data)
55 return -1; 56 return -1;
56 part = (struct mac_partition *) (data + secsize%512); 57 partoffset = secsize % 512;
58 if (partoffset + sizeof(*part) > datasize)
59 return -1;
60 part = (struct mac_partition *) (data + partoffset);
57 if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) { 61 if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) {
58 put_dev_sector(sect); 62 put_dev_sector(sect);
59 return 0; /* not a MacOS disk */ 63 return 0; /* not a MacOS disk */
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index 0aa6fdfb448a..6d4d4569447e 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -125,7 +125,7 @@ static int aead_wait_for_data(struct sock *sk, unsigned flags)
125 if (flags & MSG_DONTWAIT) 125 if (flags & MSG_DONTWAIT)
126 return -EAGAIN; 126 return -EAGAIN;
127 127
128 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 128 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
129 129
130 for (;;) { 130 for (;;) {
131 if (signal_pending(current)) 131 if (signal_pending(current))
@@ -139,7 +139,7 @@ static int aead_wait_for_data(struct sock *sk, unsigned flags)
139 } 139 }
140 finish_wait(sk_sleep(sk), &wait); 140 finish_wait(sk_sleep(sk), &wait);
141 141
142 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 142 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
143 143
144 return err; 144 return err;
145} 145}
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index af31a0ee4057..ca9efe17db1a 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -212,7 +212,7 @@ static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags)
212 if (flags & MSG_DONTWAIT) 212 if (flags & MSG_DONTWAIT)
213 return -EAGAIN; 213 return -EAGAIN;
214 214
215 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 215 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
216 216
217 for (;;) { 217 for (;;) {
218 if (signal_pending(current)) 218 if (signal_pending(current))
@@ -258,7 +258,7 @@ static int skcipher_wait_for_data(struct sock *sk, unsigned flags)
258 return -EAGAIN; 258 return -EAGAIN;
259 } 259 }
260 260
261 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 261 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
262 262
263 for (;;) { 263 for (;;) {
264 if (signal_pending(current)) 264 if (signal_pending(current))
@@ -272,7 +272,7 @@ static int skcipher_wait_for_data(struct sock *sk, unsigned flags)
272 } 272 }
273 finish_wait(sk_sleep(sk), &wait); 273 finish_wait(sk_sleep(sk), &wait);
274 274
275 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 275 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
276 276
277 return err; 277 return err;
278} 278}
diff --git a/drivers/Makefile b/drivers/Makefile
index 73d039156ea7..795d0ca714bf 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -63,6 +63,7 @@ obj-$(CONFIG_FB_I810) += video/fbdev/i810/
63obj-$(CONFIG_FB_INTEL) += video/fbdev/intelfb/ 63obj-$(CONFIG_FB_INTEL) += video/fbdev/intelfb/
64 64
65obj-$(CONFIG_PARPORT) += parport/ 65obj-$(CONFIG_PARPORT) += parport/
66obj-$(CONFIG_NVM) += lightnvm/
66obj-y += base/ block/ misc/ mfd/ nfc/ 67obj-y += base/ block/ misc/ mfd/ nfc/
67obj-$(CONFIG_LIBNVDIMM) += nvdimm/ 68obj-$(CONFIG_LIBNVDIMM) += nvdimm/
68obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/ 69obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/
@@ -70,7 +71,6 @@ obj-$(CONFIG_NUBUS) += nubus/
70obj-y += macintosh/ 71obj-y += macintosh/
71obj-$(CONFIG_IDE) += ide/ 72obj-$(CONFIG_IDE) += ide/
72obj-$(CONFIG_SCSI) += scsi/ 73obj-$(CONFIG_SCSI) += scsi/
73obj-$(CONFIG_NVM) += lightnvm/
74obj-y += nvme/ 74obj-y += nvme/
75obj-$(CONFIG_ATA) += ata/ 75obj-$(CONFIG_ATA) += ata/
76obj-$(CONFIG_TARGET_CORE) += target/ 76obj-$(CONFIG_TARGET_CORE) += target/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 25dbb76c02cc..5eef4cb4f70e 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -58,10 +58,10 @@ config ACPI_CCA_REQUIRED
58 bool 58 bool
59 59
60config ACPI_DEBUGGER 60config ACPI_DEBUGGER
61 bool "In-kernel debugger (EXPERIMENTAL)" 61 bool "AML debugger interface (EXPERIMENTAL)"
62 select ACPI_DEBUG 62 select ACPI_DEBUG
63 help 63 help
64 Enable in-kernel debugging facilities: statistics, internal 64 Enable in-kernel debugging of AML facilities: statistics, internal
65 object dump, single step control method execution. 65 object dump, single step control method execution.
66 This is still under development, currently enabling this only 66 This is still under development, currently enabling this only
67 results in the compilation of the ACPICA debugger files. 67 results in the compilation of the ACPICA debugger files.
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 3c083d2cc434..6730f965b379 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -304,7 +304,7 @@ EXPORT_SYMBOL_GPL(acpi_get_psd_map);
304 304
305static int register_pcc_channel(int pcc_subspace_idx) 305static int register_pcc_channel(int pcc_subspace_idx)
306{ 306{
307 struct acpi_pcct_subspace *cppc_ss; 307 struct acpi_pcct_hw_reduced *cppc_ss;
308 unsigned int len; 308 unsigned int len;
309 309
310 if (pcc_subspace_idx >= 0) { 310 if (pcc_subspace_idx >= 0) {
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index f61a7c834540..b420fb46669d 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1103,7 +1103,7 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
1103 } 1103 }
1104 1104
1105err_exit: 1105err_exit:
1106 if (result && q) 1106 if (result)
1107 acpi_ec_delete_query(q); 1107 acpi_ec_delete_query(q);
1108 if (data) 1108 if (data)
1109 *data = value; 1109 *data = value;
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index f7dab53b352a..e7ed39bab97d 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -233,11 +233,12 @@ static bool add_spa(struct acpi_nfit_desc *acpi_desc,
233 struct nfit_table_prev *prev, 233 struct nfit_table_prev *prev,
234 struct acpi_nfit_system_address *spa) 234 struct acpi_nfit_system_address *spa)
235{ 235{
236 size_t length = min_t(size_t, sizeof(*spa), spa->header.length);
236 struct device *dev = acpi_desc->dev; 237 struct device *dev = acpi_desc->dev;
237 struct nfit_spa *nfit_spa; 238 struct nfit_spa *nfit_spa;
238 239
239 list_for_each_entry(nfit_spa, &prev->spas, list) { 240 list_for_each_entry(nfit_spa, &prev->spas, list) {
240 if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) { 241 if (memcmp(nfit_spa->spa, spa, length) == 0) {
241 list_move_tail(&nfit_spa->list, &acpi_desc->spas); 242 list_move_tail(&nfit_spa->list, &acpi_desc->spas);
242 return true; 243 return true;
243 } 244 }
@@ -259,11 +260,12 @@ static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
259 struct nfit_table_prev *prev, 260 struct nfit_table_prev *prev,
260 struct acpi_nfit_memory_map *memdev) 261 struct acpi_nfit_memory_map *memdev)
261{ 262{
263 size_t length = min_t(size_t, sizeof(*memdev), memdev->header.length);
262 struct device *dev = acpi_desc->dev; 264 struct device *dev = acpi_desc->dev;
263 struct nfit_memdev *nfit_memdev; 265 struct nfit_memdev *nfit_memdev;
264 266
265 list_for_each_entry(nfit_memdev, &prev->memdevs, list) 267 list_for_each_entry(nfit_memdev, &prev->memdevs, list)
266 if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) { 268 if (memcmp(nfit_memdev->memdev, memdev, length) == 0) {
267 list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs); 269 list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
268 return true; 270 return true;
269 } 271 }
@@ -284,11 +286,12 @@ static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
284 struct nfit_table_prev *prev, 286 struct nfit_table_prev *prev,
285 struct acpi_nfit_control_region *dcr) 287 struct acpi_nfit_control_region *dcr)
286{ 288{
289 size_t length = min_t(size_t, sizeof(*dcr), dcr->header.length);
287 struct device *dev = acpi_desc->dev; 290 struct device *dev = acpi_desc->dev;
288 struct nfit_dcr *nfit_dcr; 291 struct nfit_dcr *nfit_dcr;
289 292
290 list_for_each_entry(nfit_dcr, &prev->dcrs, list) 293 list_for_each_entry(nfit_dcr, &prev->dcrs, list)
291 if (memcmp(nfit_dcr->dcr, dcr, sizeof(*dcr)) == 0) { 294 if (memcmp(nfit_dcr->dcr, dcr, length) == 0) {
292 list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs); 295 list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
293 return true; 296 return true;
294 } 297 }
@@ -308,11 +311,12 @@ static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
308 struct nfit_table_prev *prev, 311 struct nfit_table_prev *prev,
309 struct acpi_nfit_data_region *bdw) 312 struct acpi_nfit_data_region *bdw)
310{ 313{
314 size_t length = min_t(size_t, sizeof(*bdw), bdw->header.length);
311 struct device *dev = acpi_desc->dev; 315 struct device *dev = acpi_desc->dev;
312 struct nfit_bdw *nfit_bdw; 316 struct nfit_bdw *nfit_bdw;
313 317
314 list_for_each_entry(nfit_bdw, &prev->bdws, list) 318 list_for_each_entry(nfit_bdw, &prev->bdws, list)
315 if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) { 319 if (memcmp(nfit_bdw->bdw, bdw, length) == 0) {
316 list_move_tail(&nfit_bdw->list, &acpi_desc->bdws); 320 list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
317 return true; 321 return true;
318 } 322 }
@@ -332,11 +336,12 @@ static bool add_idt(struct acpi_nfit_desc *acpi_desc,
332 struct nfit_table_prev *prev, 336 struct nfit_table_prev *prev,
333 struct acpi_nfit_interleave *idt) 337 struct acpi_nfit_interleave *idt)
334{ 338{
339 size_t length = min_t(size_t, sizeof(*idt), idt->header.length);
335 struct device *dev = acpi_desc->dev; 340 struct device *dev = acpi_desc->dev;
336 struct nfit_idt *nfit_idt; 341 struct nfit_idt *nfit_idt;
337 342
338 list_for_each_entry(nfit_idt, &prev->idts, list) 343 list_for_each_entry(nfit_idt, &prev->idts, list)
339 if (memcmp(nfit_idt->idt, idt, sizeof(*idt)) == 0) { 344 if (memcmp(nfit_idt->idt, idt, length) == 0) {
340 list_move_tail(&nfit_idt->list, &acpi_desc->idts); 345 list_move_tail(&nfit_idt->list, &acpi_desc->idts);
341 return true; 346 return true;
342 } 347 }
@@ -356,11 +361,12 @@ static bool add_flush(struct acpi_nfit_desc *acpi_desc,
356 struct nfit_table_prev *prev, 361 struct nfit_table_prev *prev,
357 struct acpi_nfit_flush_address *flush) 362 struct acpi_nfit_flush_address *flush)
358{ 363{
364 size_t length = min_t(size_t, sizeof(*flush), flush->header.length);
359 struct device *dev = acpi_desc->dev; 365 struct device *dev = acpi_desc->dev;
360 struct nfit_flush *nfit_flush; 366 struct nfit_flush *nfit_flush;
361 367
362 list_for_each_entry(nfit_flush, &prev->flushes, list) 368 list_for_each_entry(nfit_flush, &prev->flushes, list)
363 if (memcmp(nfit_flush->flush, flush, sizeof(*flush)) == 0) { 369 if (memcmp(nfit_flush->flush, flush, length) == 0) {
364 list_move_tail(&nfit_flush->list, &acpi_desc->flushes); 370 list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
365 return true; 371 return true;
366 } 372 }
@@ -655,7 +661,7 @@ static ssize_t revision_show(struct device *dev,
655 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 661 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
656 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 662 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
657 663
658 return sprintf(buf, "%d\n", acpi_desc->nfit->header.revision); 664 return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision);
659} 665}
660static DEVICE_ATTR_RO(revision); 666static DEVICE_ATTR_RO(revision);
661 667
@@ -1652,7 +1658,6 @@ int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz)
1652 1658
1653 data = (u8 *) acpi_desc->nfit; 1659 data = (u8 *) acpi_desc->nfit;
1654 end = data + sz; 1660 end = data + sz;
1655 data += sizeof(struct acpi_table_nfit);
1656 while (!IS_ERR_OR_NULL(data)) 1661 while (!IS_ERR_OR_NULL(data))
1657 data = add_table(acpi_desc, &prev, data, end); 1662 data = add_table(acpi_desc, &prev, data, end);
1658 1663
@@ -1748,13 +1753,29 @@ static int acpi_nfit_add(struct acpi_device *adev)
1748 return PTR_ERR(acpi_desc); 1753 return PTR_ERR(acpi_desc);
1749 } 1754 }
1750 1755
1751 acpi_desc->nfit = (struct acpi_table_nfit *) tbl; 1756 /*
1757 * Save the acpi header for later and then skip it,
1758 * making nfit point to the first nfit table header.
1759 */
1760 acpi_desc->acpi_header = *tbl;
1761 acpi_desc->nfit = (void *) tbl + sizeof(struct acpi_table_nfit);
1762 sz -= sizeof(struct acpi_table_nfit);
1752 1763
1753 /* Evaluate _FIT and override with that if present */ 1764 /* Evaluate _FIT and override with that if present */
1754 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf); 1765 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
1755 if (ACPI_SUCCESS(status) && buf.length > 0) { 1766 if (ACPI_SUCCESS(status) && buf.length > 0) {
1756 acpi_desc->nfit = (struct acpi_table_nfit *)buf.pointer; 1767 union acpi_object *obj;
1757 sz = buf.length; 1768 /*
1769 * Adjust for the acpi_object header of the _FIT
1770 */
1771 obj = buf.pointer;
1772 if (obj->type == ACPI_TYPE_BUFFER) {
1773 acpi_desc->nfit =
1774 (struct acpi_nfit_header *)obj->buffer.pointer;
1775 sz = obj->buffer.length;
1776 } else
1777 dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n",
1778 __func__, (int) obj->type);
1758 } 1779 }
1759 1780
1760 rc = acpi_nfit_init(acpi_desc, sz); 1781 rc = acpi_nfit_init(acpi_desc, sz);
@@ -1777,7 +1798,8 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
1777{ 1798{
1778 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev); 1799 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
1779 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 1800 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
1780 struct acpi_table_nfit *nfit_saved; 1801 struct acpi_nfit_header *nfit_saved;
1802 union acpi_object *obj;
1781 struct device *dev = &adev->dev; 1803 struct device *dev = &adev->dev;
1782 acpi_status status; 1804 acpi_status status;
1783 int ret; 1805 int ret;
@@ -1808,12 +1830,19 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
1808 } 1830 }
1809 1831
1810 nfit_saved = acpi_desc->nfit; 1832 nfit_saved = acpi_desc->nfit;
1811 acpi_desc->nfit = (struct acpi_table_nfit *)buf.pointer; 1833 obj = buf.pointer;
1812 ret = acpi_nfit_init(acpi_desc, buf.length); 1834 if (obj->type == ACPI_TYPE_BUFFER) {
1813 if (!ret) { 1835 acpi_desc->nfit =
1814 /* Merge failed, restore old nfit, and exit */ 1836 (struct acpi_nfit_header *)obj->buffer.pointer;
1815 acpi_desc->nfit = nfit_saved; 1837 ret = acpi_nfit_init(acpi_desc, obj->buffer.length);
1816 dev_err(dev, "failed to merge updated NFIT\n"); 1838 if (ret) {
1839 /* Merge failed, restore old nfit, and exit */
1840 acpi_desc->nfit = nfit_saved;
1841 dev_err(dev, "failed to merge updated NFIT\n");
1842 }
1843 } else {
1844 /* Bad _FIT, restore old nfit */
1845 dev_err(dev, "Invalid _FIT\n");
1817 } 1846 }
1818 kfree(buf.pointer); 1847 kfree(buf.pointer);
1819 1848
diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h
index 2ea5c0797c8f..3d549a383659 100644
--- a/drivers/acpi/nfit.h
+++ b/drivers/acpi/nfit.h
@@ -96,7 +96,8 @@ struct nfit_mem {
96 96
97struct acpi_nfit_desc { 97struct acpi_nfit_desc {
98 struct nvdimm_bus_descriptor nd_desc; 98 struct nvdimm_bus_descriptor nd_desc;
99 struct acpi_table_nfit *nfit; 99 struct acpi_table_header acpi_header;
100 struct acpi_nfit_header *nfit;
100 struct mutex spa_map_mutex; 101 struct mutex spa_map_mutex;
101 struct mutex init_mutex; 102 struct mutex init_mutex;
102 struct list_head spa_maps; 103 struct list_head spa_maps;
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 850d7bf0c873..ae3fe4e64203 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -768,6 +768,13 @@ static void pci_acpi_root_add_resources(struct acpi_pci_root_info *info)
768 else 768 else
769 continue; 769 continue;
770 770
771 /*
772 * Some legacy x86 host bridge drivers use iomem_resource and
773 * ioport_resource as default resource pool, skip it.
774 */
775 if (res == root)
776 continue;
777
771 conflict = insert_resource_conflict(root, res); 778 conflict = insert_resource_conflict(root, res);
772 if (conflict) { 779 if (conflict) {
773 dev_info(&info->bridge->dev, 780 dev_info(&info->bridge->dev,
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index bf034f8b7c1a..2fa8304171e0 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -14,7 +14,6 @@
14#include <linux/delay.h> 14#include <linux/delay.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/dmi.h>
18#include "sbshc.h" 17#include "sbshc.h"
19 18
20#define PREFIX "ACPI: " 19#define PREFIX "ACPI: "
@@ -30,6 +29,7 @@ struct acpi_smb_hc {
30 u8 query_bit; 29 u8 query_bit;
31 smbus_alarm_callback callback; 30 smbus_alarm_callback callback;
32 void *context; 31 void *context;
32 bool done;
33}; 33};
34 34
35static int acpi_smbus_hc_add(struct acpi_device *device); 35static int acpi_smbus_hc_add(struct acpi_device *device);
@@ -88,8 +88,6 @@ enum acpi_smb_offset {
88 ACPI_SMB_ALARM_DATA = 0x26, /* 2 bytes alarm data */ 88 ACPI_SMB_ALARM_DATA = 0x26, /* 2 bytes alarm data */
89}; 89};
90 90
91static bool macbook;
92
93static inline int smb_hc_read(struct acpi_smb_hc *hc, u8 address, u8 *data) 91static inline int smb_hc_read(struct acpi_smb_hc *hc, u8 address, u8 *data)
94{ 92{
95 return ec_read(hc->offset + address, data); 93 return ec_read(hc->offset + address, data);
@@ -100,27 +98,11 @@ static inline int smb_hc_write(struct acpi_smb_hc *hc, u8 address, u8 data)
100 return ec_write(hc->offset + address, data); 98 return ec_write(hc->offset + address, data);
101} 99}
102 100
103static inline int smb_check_done(struct acpi_smb_hc *hc)
104{
105 union acpi_smb_status status = {.raw = 0};
106 smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw);
107 return status.fields.done && (status.fields.status == SMBUS_OK);
108}
109
110static int wait_transaction_complete(struct acpi_smb_hc *hc, int timeout) 101static int wait_transaction_complete(struct acpi_smb_hc *hc, int timeout)
111{ 102{
112 if (wait_event_timeout(hc->wait, smb_check_done(hc), 103 if (wait_event_timeout(hc->wait, hc->done, msecs_to_jiffies(timeout)))
113 msecs_to_jiffies(timeout)))
114 return 0; 104 return 0;
115 /* 105 return -ETIME;
116 * After the timeout happens, OS will try to check the status of SMbus.
117 * If the status is what OS expected, it will be regarded as the bogus
118 * timeout.
119 */
120 if (smb_check_done(hc))
121 return 0;
122 else
123 return -ETIME;
124} 106}
125 107
126static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol, 108static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol,
@@ -135,8 +117,7 @@ static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol,
135 } 117 }
136 118
137 mutex_lock(&hc->lock); 119 mutex_lock(&hc->lock);
138 if (macbook) 120 hc->done = false;
139 udelay(5);
140 if (smb_hc_read(hc, ACPI_SMB_PROTOCOL, &temp)) 121 if (smb_hc_read(hc, ACPI_SMB_PROTOCOL, &temp))
141 goto end; 122 goto end;
142 if (temp) { 123 if (temp) {
@@ -235,8 +216,10 @@ static int smbus_alarm(void *context)
235 if (smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw)) 216 if (smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw))
236 return 0; 217 return 0;
237 /* Check if it is only a completion notify */ 218 /* Check if it is only a completion notify */
238 if (status.fields.done) 219 if (status.fields.done && status.fields.status == SMBUS_OK) {
220 hc->done = true;
239 wake_up(&hc->wait); 221 wake_up(&hc->wait);
222 }
240 if (!status.fields.alarm) 223 if (!status.fields.alarm)
241 return 0; 224 return 0;
242 mutex_lock(&hc->lock); 225 mutex_lock(&hc->lock);
@@ -262,29 +245,12 @@ extern int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
262 acpi_handle handle, acpi_ec_query_func func, 245 acpi_handle handle, acpi_ec_query_func func,
263 void *data); 246 void *data);
264 247
265static int macbook_dmi_match(const struct dmi_system_id *d)
266{
267 pr_debug("Detected MacBook, enabling workaround\n");
268 macbook = true;
269 return 0;
270}
271
272static struct dmi_system_id acpi_smbus_dmi_table[] = {
273 { macbook_dmi_match, "Apple MacBook", {
274 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
275 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook") },
276 },
277 { },
278};
279
280static int acpi_smbus_hc_add(struct acpi_device *device) 248static int acpi_smbus_hc_add(struct acpi_device *device)
281{ 249{
282 int status; 250 int status;
283 unsigned long long val; 251 unsigned long long val;
284 struct acpi_smb_hc *hc; 252 struct acpi_smb_hc *hc;
285 253
286 dmi_check_system(acpi_smbus_dmi_table);
287
288 if (!device) 254 if (!device)
289 return -EINVAL; 255 return -EINVAL;
290 256
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index e03b1ad25a90..167418e73445 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -1775,10 +1775,10 @@ int genpd_dev_pm_attach(struct device *dev)
1775 } 1775 }
1776 1776
1777 pd = of_genpd_get_from_provider(&pd_args); 1777 pd = of_genpd_get_from_provider(&pd_args);
1778 of_node_put(pd_args.np);
1778 if (IS_ERR(pd)) { 1779 if (IS_ERR(pd)) {
1779 dev_dbg(dev, "%s() failed to find PM domain: %ld\n", 1780 dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
1780 __func__, PTR_ERR(pd)); 1781 __func__, PTR_ERR(pd));
1781 of_node_put(dev->of_node);
1782 return -EPROBE_DEFER; 1782 return -EPROBE_DEFER;
1783 } 1783 }
1784 1784
@@ -1796,7 +1796,6 @@ int genpd_dev_pm_attach(struct device *dev)
1796 if (ret < 0) { 1796 if (ret < 0) {
1797 dev_err(dev, "failed to add to PM domain %s: %d", 1797 dev_err(dev, "failed to add to PM domain %s: %d",
1798 pd->name, ret); 1798 pd->name, ret);
1799 of_node_put(dev->of_node);
1800 goto out; 1799 goto out;
1801 } 1800 }
1802 1801
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index e60dd12e23aa..1e937ac5f456 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -160,9 +160,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
160 struct gpd_timing_data *td; 160 struct gpd_timing_data *td;
161 s64 constraint_ns; 161 s64 constraint_ns;
162 162
163 if (!pdd->dev->driver)
164 continue;
165
166 /* 163 /*
167 * Check if the device is allowed to be off long enough for the 164 * Check if the device is allowed to be off long enough for the
168 * domain to turn off and on (that's how much time it will 165 * domain to turn off and on (that's how much time it will
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index eb6e67451dec..0d77cd6fd8d1 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -68,6 +68,9 @@ int dev_pm_set_wake_irq(struct device *dev, int irq)
68 struct wake_irq *wirq; 68 struct wake_irq *wirq;
69 int err; 69 int err;
70 70
71 if (irq < 0)
72 return -EINVAL;
73
71 wirq = kzalloc(sizeof(*wirq), GFP_KERNEL); 74 wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
72 if (!wirq) 75 if (!wirq)
73 return -ENOMEM; 76 return -ENOMEM;
@@ -167,6 +170,9 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
167 struct wake_irq *wirq; 170 struct wake_irq *wirq;
168 int err; 171 int err;
169 172
173 if (irq < 0)
174 return -EINVAL;
175
170 wirq = kzalloc(sizeof(*wirq), GFP_KERNEL); 176 wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
171 if (!wirq) 177 if (!wirq)
172 return -ENOMEM; 178 return -ENOMEM;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index a28a562f7b7f..3457ac8c03e2 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3810,7 +3810,6 @@ static int mtip_block_initialize(struct driver_data *dd)
3810 sector_t capacity; 3810 sector_t capacity;
3811 unsigned int index = 0; 3811 unsigned int index = 0;
3812 struct kobject *kobj; 3812 struct kobject *kobj;
3813 unsigned char thd_name[16];
3814 3813
3815 if (dd->disk) 3814 if (dd->disk)
3816 goto skip_create_disk; /* hw init done, before rebuild */ 3815 goto skip_create_disk; /* hw init done, before rebuild */
@@ -3958,10 +3957,9 @@ skip_create_disk:
3958 } 3957 }
3959 3958
3960start_service_thread: 3959start_service_thread:
3961 sprintf(thd_name, "mtip_svc_thd_%02d", index);
3962 dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread, 3960 dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread,
3963 dd, dd->numa_node, "%s", 3961 dd, dd->numa_node,
3964 thd_name); 3962 "mtip_svc_thd_%02d", index);
3965 3963
3966 if (IS_ERR(dd->mtip_svc_handler)) { 3964 if (IS_ERR(dd->mtip_svc_handler)) {
3967 dev_err(&dd->pdev->dev, "service thread failed to start\n"); 3965 dev_err(&dd->pdev->dev, "service thread failed to start\n");
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 6255d1c4bba4..0c3940ec5e62 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -8,6 +8,7 @@
8#include <linux/slab.h> 8#include <linux/slab.h>
9#include <linux/blk-mq.h> 9#include <linux/blk-mq.h>
10#include <linux/hrtimer.h> 10#include <linux/hrtimer.h>
11#include <linux/lightnvm.h>
11 12
12struct nullb_cmd { 13struct nullb_cmd {
13 struct list_head list; 14 struct list_head list;
@@ -17,6 +18,7 @@ struct nullb_cmd {
17 struct bio *bio; 18 struct bio *bio;
18 unsigned int tag; 19 unsigned int tag;
19 struct nullb_queue *nq; 20 struct nullb_queue *nq;
21 struct hrtimer timer;
20}; 22};
21 23
22struct nullb_queue { 24struct nullb_queue {
@@ -39,23 +41,14 @@ struct nullb {
39 41
40 struct nullb_queue *queues; 42 struct nullb_queue *queues;
41 unsigned int nr_queues; 43 unsigned int nr_queues;
44 char disk_name[DISK_NAME_LEN];
42}; 45};
43 46
44static LIST_HEAD(nullb_list); 47static LIST_HEAD(nullb_list);
45static struct mutex lock; 48static struct mutex lock;
46static int null_major; 49static int null_major;
47static int nullb_indexes; 50static int nullb_indexes;
48 51static struct kmem_cache *ppa_cache;
49struct completion_queue {
50 struct llist_head list;
51 struct hrtimer timer;
52};
53
54/*
55 * These are per-cpu for now, they will need to be configured by the
56 * complete_queues parameter and appropriately mapped.
57 */
58static DEFINE_PER_CPU(struct completion_queue, completion_queues);
59 52
60enum { 53enum {
61 NULL_IRQ_NONE = 0, 54 NULL_IRQ_NONE = 0,
@@ -119,6 +112,10 @@ static int nr_devices = 2;
119module_param(nr_devices, int, S_IRUGO); 112module_param(nr_devices, int, S_IRUGO);
120MODULE_PARM_DESC(nr_devices, "Number of devices to register"); 113MODULE_PARM_DESC(nr_devices, "Number of devices to register");
121 114
115static bool use_lightnvm;
116module_param(use_lightnvm, bool, S_IRUGO);
117MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device");
118
122static int irqmode = NULL_IRQ_SOFTIRQ; 119static int irqmode = NULL_IRQ_SOFTIRQ;
123 120
124static int null_set_irqmode(const char *str, const struct kernel_param *kp) 121static int null_set_irqmode(const char *str, const struct kernel_param *kp)
@@ -135,8 +132,8 @@ static const struct kernel_param_ops null_irqmode_param_ops = {
135device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO); 132device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO);
136MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer"); 133MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
137 134
138static int completion_nsec = 10000; 135static unsigned long completion_nsec = 10000;
139module_param(completion_nsec, int, S_IRUGO); 136module_param(completion_nsec, ulong, S_IRUGO);
140MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns"); 137MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
141 138
142static int hw_queue_depth = 64; 139static int hw_queue_depth = 64;
@@ -173,6 +170,8 @@ static void free_cmd(struct nullb_cmd *cmd)
173 put_tag(cmd->nq, cmd->tag); 170 put_tag(cmd->nq, cmd->tag);
174} 171}
175 172
173static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
174
176static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq) 175static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
177{ 176{
178 struct nullb_cmd *cmd; 177 struct nullb_cmd *cmd;
@@ -183,6 +182,11 @@ static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
183 cmd = &nq->cmds[tag]; 182 cmd = &nq->cmds[tag];
184 cmd->tag = tag; 183 cmd->tag = tag;
185 cmd->nq = nq; 184 cmd->nq = nq;
185 if (irqmode == NULL_IRQ_TIMER) {
186 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
187 HRTIMER_MODE_REL);
188 cmd->timer.function = null_cmd_timer_expired;
189 }
186 return cmd; 190 return cmd;
187 } 191 }
188 192
@@ -213,6 +217,8 @@ static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
213 217
214static void end_cmd(struct nullb_cmd *cmd) 218static void end_cmd(struct nullb_cmd *cmd)
215{ 219{
220 struct request_queue *q = NULL;
221
216 switch (queue_mode) { 222 switch (queue_mode) {
217 case NULL_Q_MQ: 223 case NULL_Q_MQ:
218 blk_mq_end_request(cmd->rq, 0); 224 blk_mq_end_request(cmd->rq, 0);
@@ -223,55 +229,37 @@ static void end_cmd(struct nullb_cmd *cmd)
223 break; 229 break;
224 case NULL_Q_BIO: 230 case NULL_Q_BIO:
225 bio_endio(cmd->bio); 231 bio_endio(cmd->bio);
226 break; 232 goto free_cmd;
227 } 233 }
228 234
235 if (cmd->rq)
236 q = cmd->rq->q;
237
238 /* Restart queue if needed, as we are freeing a tag */
239 if (q && !q->mq_ops && blk_queue_stopped(q)) {
240 unsigned long flags;
241
242 spin_lock_irqsave(q->queue_lock, flags);
243 if (blk_queue_stopped(q))
244 blk_start_queue(q);
245 spin_unlock_irqrestore(q->queue_lock, flags);
246 }
247free_cmd:
229 free_cmd(cmd); 248 free_cmd(cmd);
230} 249}
231 250
232static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) 251static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
233{ 252{
234 struct completion_queue *cq; 253 end_cmd(container_of(timer, struct nullb_cmd, timer));
235 struct llist_node *entry;
236 struct nullb_cmd *cmd;
237
238 cq = &per_cpu(completion_queues, smp_processor_id());
239
240 while ((entry = llist_del_all(&cq->list)) != NULL) {
241 entry = llist_reverse_order(entry);
242 do {
243 struct request_queue *q = NULL;
244
245 cmd = container_of(entry, struct nullb_cmd, ll_list);
246 entry = entry->next;
247 if (cmd->rq)
248 q = cmd->rq->q;
249 end_cmd(cmd);
250
251 if (q && !q->mq_ops && blk_queue_stopped(q)) {
252 spin_lock(q->queue_lock);
253 if (blk_queue_stopped(q))
254 blk_start_queue(q);
255 spin_unlock(q->queue_lock);
256 }
257 } while (entry);
258 }
259 254
260 return HRTIMER_NORESTART; 255 return HRTIMER_NORESTART;
261} 256}
262 257
263static void null_cmd_end_timer(struct nullb_cmd *cmd) 258static void null_cmd_end_timer(struct nullb_cmd *cmd)
264{ 259{
265 struct completion_queue *cq = &per_cpu(completion_queues, get_cpu()); 260 ktime_t kt = ktime_set(0, completion_nsec);
266
267 cmd->ll_list.next = NULL;
268 if (llist_add(&cmd->ll_list, &cq->list)) {
269 ktime_t kt = ktime_set(0, completion_nsec);
270 261
271 hrtimer_start(&cq->timer, kt, HRTIMER_MODE_REL_PINNED); 262 hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
272 }
273
274 put_cpu();
275} 263}
276 264
277static void null_softirq_done_fn(struct request *rq) 265static void null_softirq_done_fn(struct request *rq)
@@ -369,6 +357,10 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx,
369{ 357{
370 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); 358 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
371 359
360 if (irqmode == NULL_IRQ_TIMER) {
361 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
362 cmd->timer.function = null_cmd_timer_expired;
363 }
372 cmd->rq = bd->rq; 364 cmd->rq = bd->rq;
373 cmd->nq = hctx->driver_data; 365 cmd->nq = hctx->driver_data;
374 366
@@ -427,15 +419,156 @@ static void null_del_dev(struct nullb *nullb)
427{ 419{
428 list_del_init(&nullb->list); 420 list_del_init(&nullb->list);
429 421
430 del_gendisk(nullb->disk); 422 if (use_lightnvm)
423 nvm_unregister(nullb->disk_name);
424 else
425 del_gendisk(nullb->disk);
431 blk_cleanup_queue(nullb->q); 426 blk_cleanup_queue(nullb->q);
432 if (queue_mode == NULL_Q_MQ) 427 if (queue_mode == NULL_Q_MQ)
433 blk_mq_free_tag_set(&nullb->tag_set); 428 blk_mq_free_tag_set(&nullb->tag_set);
434 put_disk(nullb->disk); 429 if (!use_lightnvm)
430 put_disk(nullb->disk);
435 cleanup_queues(nullb); 431 cleanup_queues(nullb);
436 kfree(nullb); 432 kfree(nullb);
437} 433}
438 434
435#ifdef CONFIG_NVM
436
437static void null_lnvm_end_io(struct request *rq, int error)
438{
439 struct nvm_rq *rqd = rq->end_io_data;
440 struct nvm_dev *dev = rqd->dev;
441
442 dev->mt->end_io(rqd, error);
443
444 blk_put_request(rq);
445}
446
447static int null_lnvm_submit_io(struct request_queue *q, struct nvm_rq *rqd)
448{
449 struct request *rq;
450 struct bio *bio = rqd->bio;
451
452 rq = blk_mq_alloc_request(q, bio_rw(bio), GFP_KERNEL, 0);
453 if (IS_ERR(rq))
454 return -ENOMEM;
455
456 rq->cmd_type = REQ_TYPE_DRV_PRIV;
457 rq->__sector = bio->bi_iter.bi_sector;
458 rq->ioprio = bio_prio(bio);
459
460 if (bio_has_data(bio))
461 rq->nr_phys_segments = bio_phys_segments(q, bio);
462
463 rq->__data_len = bio->bi_iter.bi_size;
464 rq->bio = rq->biotail = bio;
465
466 rq->end_io_data = rqd;
467
468 blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io);
469
470 return 0;
471}
472
473static int null_lnvm_id(struct request_queue *q, struct nvm_id *id)
474{
475 sector_t size = gb * 1024 * 1024 * 1024ULL;
476 sector_t blksize;
477 struct nvm_id_group *grp;
478
479 id->ver_id = 0x1;
480 id->vmnt = 0;
481 id->cgrps = 1;
482 id->cap = 0x3;
483 id->dom = 0x1;
484
485 id->ppaf.blk_offset = 0;
486 id->ppaf.blk_len = 16;
487 id->ppaf.pg_offset = 16;
488 id->ppaf.pg_len = 16;
489 id->ppaf.sect_offset = 32;
490 id->ppaf.sect_len = 8;
491 id->ppaf.pln_offset = 40;
492 id->ppaf.pln_len = 8;
493 id->ppaf.lun_offset = 48;
494 id->ppaf.lun_len = 8;
495 id->ppaf.ch_offset = 56;
496 id->ppaf.ch_len = 8;
497
498 do_div(size, bs); /* convert size to pages */
499 do_div(size, 256); /* concert size to pgs pr blk */
500 grp = &id->groups[0];
501 grp->mtype = 0;
502 grp->fmtype = 0;
503 grp->num_ch = 1;
504 grp->num_pg = 256;
505 blksize = size;
506 do_div(size, (1 << 16));
507 grp->num_lun = size + 1;
508 do_div(blksize, grp->num_lun);
509 grp->num_blk = blksize;
510 grp->num_pln = 1;
511
512 grp->fpg_sz = bs;
513 grp->csecs = bs;
514 grp->trdt = 25000;
515 grp->trdm = 25000;
516 grp->tprt = 500000;
517 grp->tprm = 500000;
518 grp->tbet = 1500000;
519 grp->tbem = 1500000;
520 grp->mpos = 0x010101; /* single plane rwe */
521 grp->cpar = hw_queue_depth;
522
523 return 0;
524}
525
526static void *null_lnvm_create_dma_pool(struct request_queue *q, char *name)
527{
528 mempool_t *virtmem_pool;
529
530 virtmem_pool = mempool_create_slab_pool(64, ppa_cache);
531 if (!virtmem_pool) {
532 pr_err("null_blk: Unable to create virtual memory pool\n");
533 return NULL;
534 }
535
536 return virtmem_pool;
537}
538
539static void null_lnvm_destroy_dma_pool(void *pool)
540{
541 mempool_destroy(pool);
542}
543
544static void *null_lnvm_dev_dma_alloc(struct request_queue *q, void *pool,
545 gfp_t mem_flags, dma_addr_t *dma_handler)
546{
547 return mempool_alloc(pool, mem_flags);
548}
549
550static void null_lnvm_dev_dma_free(void *pool, void *entry,
551 dma_addr_t dma_handler)
552{
553 mempool_free(entry, pool);
554}
555
556static struct nvm_dev_ops null_lnvm_dev_ops = {
557 .identity = null_lnvm_id,
558 .submit_io = null_lnvm_submit_io,
559
560 .create_dma_pool = null_lnvm_create_dma_pool,
561 .destroy_dma_pool = null_lnvm_destroy_dma_pool,
562 .dev_dma_alloc = null_lnvm_dev_dma_alloc,
563 .dev_dma_free = null_lnvm_dev_dma_free,
564
565 /* Simulate nvme protocol restriction */
566 .max_phys_sect = 64,
567};
568#else
569static struct nvm_dev_ops null_lnvm_dev_ops;
570#endif /* CONFIG_NVM */
571
439static int null_open(struct block_device *bdev, fmode_t mode) 572static int null_open(struct block_device *bdev, fmode_t mode)
440{ 573{
441 return 0; 574 return 0;
@@ -575,11 +708,6 @@ static int null_add_dev(void)
575 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); 708 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
576 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q); 709 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
577 710
578 disk = nullb->disk = alloc_disk_node(1, home_node);
579 if (!disk) {
580 rv = -ENOMEM;
581 goto out_cleanup_blk_queue;
582 }
583 711
584 mutex_lock(&lock); 712 mutex_lock(&lock);
585 list_add_tail(&nullb->list, &nullb_list); 713 list_add_tail(&nullb->list, &nullb_list);
@@ -589,6 +717,21 @@ static int null_add_dev(void)
589 blk_queue_logical_block_size(nullb->q, bs); 717 blk_queue_logical_block_size(nullb->q, bs);
590 blk_queue_physical_block_size(nullb->q, bs); 718 blk_queue_physical_block_size(nullb->q, bs);
591 719
720 sprintf(nullb->disk_name, "nullb%d", nullb->index);
721
722 if (use_lightnvm) {
723 rv = nvm_register(nullb->q, nullb->disk_name,
724 &null_lnvm_dev_ops);
725 if (rv)
726 goto out_cleanup_blk_queue;
727 goto done;
728 }
729
730 disk = nullb->disk = alloc_disk_node(1, home_node);
731 if (!disk) {
732 rv = -ENOMEM;
733 goto out_cleanup_lightnvm;
734 }
592 size = gb * 1024 * 1024 * 1024ULL; 735 size = gb * 1024 * 1024 * 1024ULL;
593 set_capacity(disk, size >> 9); 736 set_capacity(disk, size >> 9);
594 737
@@ -598,10 +741,15 @@ static int null_add_dev(void)
598 disk->fops = &null_fops; 741 disk->fops = &null_fops;
599 disk->private_data = nullb; 742 disk->private_data = nullb;
600 disk->queue = nullb->q; 743 disk->queue = nullb->q;
601 sprintf(disk->disk_name, "nullb%d", nullb->index); 744 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
745
602 add_disk(disk); 746 add_disk(disk);
747done:
603 return 0; 748 return 0;
604 749
750out_cleanup_lightnvm:
751 if (use_lightnvm)
752 nvm_unregister(nullb->disk_name);
605out_cleanup_blk_queue: 753out_cleanup_blk_queue:
606 blk_cleanup_queue(nullb->q); 754 blk_cleanup_queue(nullb->q);
607out_cleanup_tags: 755out_cleanup_tags:
@@ -625,6 +773,18 @@ static int __init null_init(void)
625 bs = PAGE_SIZE; 773 bs = PAGE_SIZE;
626 } 774 }
627 775
776 if (use_lightnvm && bs != 4096) {
777 pr_warn("null_blk: LightNVM only supports 4k block size\n");
778 pr_warn("null_blk: defaults block size to 4k\n");
779 bs = 4096;
780 }
781
782 if (use_lightnvm && queue_mode != NULL_Q_MQ) {
783 pr_warn("null_blk: LightNVM only supported for blk-mq\n");
784 pr_warn("null_blk: defaults queue mode to blk-mq\n");
785 queue_mode = NULL_Q_MQ;
786 }
787
628 if (queue_mode == NULL_Q_MQ && use_per_node_hctx) { 788 if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
629 if (submit_queues < nr_online_nodes) { 789 if (submit_queues < nr_online_nodes) {
630 pr_warn("null_blk: submit_queues param is set to %u.", 790 pr_warn("null_blk: submit_queues param is set to %u.",
@@ -638,32 +798,31 @@ static int __init null_init(void)
638 798
639 mutex_init(&lock); 799 mutex_init(&lock);
640 800
641 /* Initialize a separate list for each CPU for issuing softirqs */
642 for_each_possible_cpu(i) {
643 struct completion_queue *cq = &per_cpu(completion_queues, i);
644
645 init_llist_head(&cq->list);
646
647 if (irqmode != NULL_IRQ_TIMER)
648 continue;
649
650 hrtimer_init(&cq->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
651 cq->timer.function = null_cmd_timer_expired;
652 }
653
654 null_major = register_blkdev(0, "nullb"); 801 null_major = register_blkdev(0, "nullb");
655 if (null_major < 0) 802 if (null_major < 0)
656 return null_major; 803 return null_major;
657 804
805 if (use_lightnvm) {
806 ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64),
807 0, 0, NULL);
808 if (!ppa_cache) {
809 pr_err("null_blk: unable to create ppa cache\n");
810 return -ENOMEM;
811 }
812 }
813
658 for (i = 0; i < nr_devices; i++) { 814 for (i = 0; i < nr_devices; i++) {
659 if (null_add_dev()) { 815 if (null_add_dev()) {
660 unregister_blkdev(null_major, "nullb"); 816 unregister_blkdev(null_major, "nullb");
661 return -EINVAL; 817 goto err_ppa;
662 } 818 }
663 } 819 }
664 820
665 pr_info("null: module loaded\n"); 821 pr_info("null: module loaded\n");
666 return 0; 822 return 0;
823err_ppa:
824 kmem_cache_destroy(ppa_cache);
825 return -EINVAL;
667} 826}
668 827
669static void __exit null_exit(void) 828static void __exit null_exit(void)
@@ -678,6 +837,8 @@ static void __exit null_exit(void)
678 null_del_dev(nullb); 837 null_del_dev(nullb);
679 } 838 }
680 mutex_unlock(&lock); 839 mutex_unlock(&lock);
840
841 kmem_cache_destroy(ppa_cache);
681} 842}
682 843
683module_init(null_init); 844module_init(null_init);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 235708c7c46e..81ea69fee7ca 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3442,6 +3442,7 @@ static void rbd_queue_workfn(struct work_struct *work)
3442 goto err_rq; 3442 goto err_rq;
3443 } 3443 }
3444 img_request->rq = rq; 3444 img_request->rq = rq;
3445 snapc = NULL; /* img_request consumes a ref */
3445 3446
3446 if (op_type == OBJ_OP_DISCARD) 3447 if (op_type == OBJ_OP_DISCARD)
3447 result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA, 3448 result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
diff --git a/drivers/bus/omap-ocp2scp.c b/drivers/bus/omap-ocp2scp.c
index 9f1856948758..bf500e0e7362 100644
--- a/drivers/bus/omap-ocp2scp.c
+++ b/drivers/bus/omap-ocp2scp.c
@@ -117,7 +117,7 @@ static struct platform_driver omap_ocp2scp_driver = {
117 117
118module_platform_driver(omap_ocp2scp_driver); 118module_platform_driver(omap_ocp2scp_driver);
119 119
120MODULE_ALIAS("platform: omap-ocp2scp"); 120MODULE_ALIAS("platform:omap-ocp2scp");
121MODULE_AUTHOR("Texas Instruments Inc."); 121MODULE_AUTHOR("Texas Instruments Inc.");
122MODULE_DESCRIPTION("OMAP OCP2SCP driver"); 122MODULE_DESCRIPTION("OMAP OCP2SCP driver");
123MODULE_LICENSE("GPL v2"); 123MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 654f6f36a071..55fe9020459f 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -412,18 +412,42 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
412 return rv; 412 return rv;
413} 413}
414 414
415static void start_check_enables(struct smi_info *smi_info) 415static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
416{
417 smi_info->last_timeout_jiffies = jiffies;
418 mod_timer(&smi_info->si_timer, new_val);
419 smi_info->timer_running = true;
420}
421
422/*
423 * Start a new message and (re)start the timer and thread.
424 */
425static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
426 unsigned int size)
427{
428 smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
429
430 if (smi_info->thread)
431 wake_up_process(smi_info->thread);
432
433 smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
434}
435
436static void start_check_enables(struct smi_info *smi_info, bool start_timer)
416{ 437{
417 unsigned char msg[2]; 438 unsigned char msg[2];
418 439
419 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 440 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
420 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; 441 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
421 442
422 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); 443 if (start_timer)
444 start_new_msg(smi_info, msg, 2);
445 else
446 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
423 smi_info->si_state = SI_CHECKING_ENABLES; 447 smi_info->si_state = SI_CHECKING_ENABLES;
424} 448}
425 449
426static void start_clear_flags(struct smi_info *smi_info) 450static void start_clear_flags(struct smi_info *smi_info, bool start_timer)
427{ 451{
428 unsigned char msg[3]; 452 unsigned char msg[3];
429 453
@@ -432,7 +456,10 @@ static void start_clear_flags(struct smi_info *smi_info)
432 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; 456 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
433 msg[2] = WDT_PRE_TIMEOUT_INT; 457 msg[2] = WDT_PRE_TIMEOUT_INT;
434 458
435 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); 459 if (start_timer)
460 start_new_msg(smi_info, msg, 3);
461 else
462 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
436 smi_info->si_state = SI_CLEARING_FLAGS; 463 smi_info->si_state = SI_CLEARING_FLAGS;
437} 464}
438 465
@@ -442,10 +469,8 @@ static void start_getting_msg_queue(struct smi_info *smi_info)
442 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD; 469 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
443 smi_info->curr_msg->data_size = 2; 470 smi_info->curr_msg->data_size = 2;
444 471
445 smi_info->handlers->start_transaction( 472 start_new_msg(smi_info, smi_info->curr_msg->data,
446 smi_info->si_sm, 473 smi_info->curr_msg->data_size);
447 smi_info->curr_msg->data,
448 smi_info->curr_msg->data_size);
449 smi_info->si_state = SI_GETTING_MESSAGES; 474 smi_info->si_state = SI_GETTING_MESSAGES;
450} 475}
451 476
@@ -455,20 +480,11 @@ static void start_getting_events(struct smi_info *smi_info)
455 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; 480 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
456 smi_info->curr_msg->data_size = 2; 481 smi_info->curr_msg->data_size = 2;
457 482
458 smi_info->handlers->start_transaction( 483 start_new_msg(smi_info, smi_info->curr_msg->data,
459 smi_info->si_sm, 484 smi_info->curr_msg->data_size);
460 smi_info->curr_msg->data,
461 smi_info->curr_msg->data_size);
462 smi_info->si_state = SI_GETTING_EVENTS; 485 smi_info->si_state = SI_GETTING_EVENTS;
463} 486}
464 487
465static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
466{
467 smi_info->last_timeout_jiffies = jiffies;
468 mod_timer(&smi_info->si_timer, new_val);
469 smi_info->timer_running = true;
470}
471
472/* 488/*
473 * When we have a situtaion where we run out of memory and cannot 489 * When we have a situtaion where we run out of memory and cannot
474 * allocate messages, we just leave them in the BMC and run the system 490 * allocate messages, we just leave them in the BMC and run the system
@@ -478,11 +494,11 @@ static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
478 * Note that we cannot just use disable_irq(), since the interrupt may 494 * Note that we cannot just use disable_irq(), since the interrupt may
479 * be shared. 495 * be shared.
480 */ 496 */
481static inline bool disable_si_irq(struct smi_info *smi_info) 497static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer)
482{ 498{
483 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { 499 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
484 smi_info->interrupt_disabled = true; 500 smi_info->interrupt_disabled = true;
485 start_check_enables(smi_info); 501 start_check_enables(smi_info, start_timer);
486 return true; 502 return true;
487 } 503 }
488 return false; 504 return false;
@@ -492,7 +508,7 @@ static inline bool enable_si_irq(struct smi_info *smi_info)
492{ 508{
493 if ((smi_info->irq) && (smi_info->interrupt_disabled)) { 509 if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
494 smi_info->interrupt_disabled = false; 510 smi_info->interrupt_disabled = false;
495 start_check_enables(smi_info); 511 start_check_enables(smi_info, true);
496 return true; 512 return true;
497 } 513 }
498 return false; 514 return false;
@@ -510,7 +526,7 @@ static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
510 526
511 msg = ipmi_alloc_smi_msg(); 527 msg = ipmi_alloc_smi_msg();
512 if (!msg) { 528 if (!msg) {
513 if (!disable_si_irq(smi_info)) 529 if (!disable_si_irq(smi_info, true))
514 smi_info->si_state = SI_NORMAL; 530 smi_info->si_state = SI_NORMAL;
515 } else if (enable_si_irq(smi_info)) { 531 } else if (enable_si_irq(smi_info)) {
516 ipmi_free_smi_msg(msg); 532 ipmi_free_smi_msg(msg);
@@ -526,7 +542,7 @@ static void handle_flags(struct smi_info *smi_info)
526 /* Watchdog pre-timeout */ 542 /* Watchdog pre-timeout */
527 smi_inc_stat(smi_info, watchdog_pretimeouts); 543 smi_inc_stat(smi_info, watchdog_pretimeouts);
528 544
529 start_clear_flags(smi_info); 545 start_clear_flags(smi_info, true);
530 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; 546 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
531 if (smi_info->intf) 547 if (smi_info->intf)
532 ipmi_smi_watchdog_pretimeout(smi_info->intf); 548 ipmi_smi_watchdog_pretimeout(smi_info->intf);
@@ -879,8 +895,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
879 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 895 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
880 msg[1] = IPMI_GET_MSG_FLAGS_CMD; 896 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
881 897
882 smi_info->handlers->start_transaction( 898 start_new_msg(smi_info, msg, 2);
883 smi_info->si_sm, msg, 2);
884 smi_info->si_state = SI_GETTING_FLAGS; 899 smi_info->si_state = SI_GETTING_FLAGS;
885 goto restart; 900 goto restart;
886 } 901 }
@@ -910,7 +925,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
910 * disable and messages disabled. 925 * disable and messages disabled.
911 */ 926 */
912 if (smi_info->supports_event_msg_buff || smi_info->irq) { 927 if (smi_info->supports_event_msg_buff || smi_info->irq) {
913 start_check_enables(smi_info); 928 start_check_enables(smi_info, true);
914 } else { 929 } else {
915 smi_info->curr_msg = alloc_msg_handle_irq(smi_info); 930 smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
916 if (!smi_info->curr_msg) 931 if (!smi_info->curr_msg)
@@ -920,6 +935,13 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
920 } 935 }
921 goto restart; 936 goto restart;
922 } 937 }
938
939 if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) {
940 /* Ok it if fails, the timer will just go off. */
941 if (del_timer(&smi_info->si_timer))
942 smi_info->timer_running = false;
943 }
944
923 out: 945 out:
924 return si_sm_result; 946 return si_sm_result;
925} 947}
@@ -2560,6 +2582,7 @@ static const struct of_device_id of_ipmi_match[] = {
2560 .data = (void *)(unsigned long) SI_BT }, 2582 .data = (void *)(unsigned long) SI_BT },
2561 {}, 2583 {},
2562}; 2584};
2585MODULE_DEVICE_TABLE(of, of_ipmi_match);
2563 2586
2564static int of_ipmi_probe(struct platform_device *dev) 2587static int of_ipmi_probe(struct platform_device *dev)
2565{ 2588{
@@ -2646,7 +2669,6 @@ static int of_ipmi_probe(struct platform_device *dev)
2646 } 2669 }
2647 return 0; 2670 return 0;
2648} 2671}
2649MODULE_DEVICE_TABLE(of, of_ipmi_match);
2650#else 2672#else
2651#define of_ipmi_match NULL 2673#define of_ipmi_match NULL
2652static int of_ipmi_probe(struct platform_device *dev) 2674static int of_ipmi_probe(struct platform_device *dev)
@@ -3613,7 +3635,7 @@ static int try_smi_init(struct smi_info *new_smi)
3613 * Start clearing the flags before we enable interrupts or the 3635 * Start clearing the flags before we enable interrupts or the
3614 * timer to avoid racing with the timer. 3636 * timer to avoid racing with the timer.
3615 */ 3637 */
3616 start_clear_flags(new_smi); 3638 start_clear_flags(new_smi, false);
3617 3639
3618 /* 3640 /*
3619 * IRQ is defined to be set when non-zero. req_events will 3641 * IRQ is defined to be set when non-zero. req_events will
@@ -3908,7 +3930,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
3908 poll(to_clean); 3930 poll(to_clean);
3909 schedule_timeout_uninterruptible(1); 3931 schedule_timeout_uninterruptible(1);
3910 } 3932 }
3911 disable_si_irq(to_clean); 3933 disable_si_irq(to_clean, false);
3912 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { 3934 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3913 poll(to_clean); 3935 poll(to_clean);
3914 schedule_timeout_uninterruptible(1); 3936 schedule_timeout_uninterruptible(1);
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 0ac3bd1a5497..096f0cef4da1 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -153,6 +153,9 @@ static int timeout = 10;
153/* The pre-timeout is disabled by default. */ 153/* The pre-timeout is disabled by default. */
154static int pretimeout; 154static int pretimeout;
155 155
156/* Default timeout to set on panic */
157static int panic_wdt_timeout = 255;
158
156/* Default action is to reset the board on a timeout. */ 159/* Default action is to reset the board on a timeout. */
157static unsigned char action_val = WDOG_TIMEOUT_RESET; 160static unsigned char action_val = WDOG_TIMEOUT_RESET;
158 161
@@ -293,6 +296,9 @@ MODULE_PARM_DESC(timeout, "Timeout value in seconds.");
293module_param(pretimeout, timeout, 0644); 296module_param(pretimeout, timeout, 0644);
294MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds."); 297MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds.");
295 298
299module_param(panic_wdt_timeout, timeout, 0644);
300MODULE_PARM_DESC(timeout, "Timeout value on kernel panic in seconds.");
301
296module_param_cb(action, &param_ops_str, action_op, 0644); 302module_param_cb(action, &param_ops_str, action_op, 0644);
297MODULE_PARM_DESC(action, "Timeout action. One of: " 303MODULE_PARM_DESC(action, "Timeout action. One of: "
298 "reset, none, power_cycle, power_off."); 304 "reset, none, power_cycle, power_off.");
@@ -1189,7 +1195,7 @@ static int wdog_panic_handler(struct notifier_block *this,
1189 /* Make sure we do this only once. */ 1195 /* Make sure we do this only once. */
1190 panic_event_handled = 1; 1196 panic_event_handled = 1;
1191 1197
1192 timeout = 255; 1198 timeout = panic_wdt_timeout;
1193 pretimeout = 0; 1199 pretimeout = 0;
1194 panic_halt_ipmi_set_timeout(); 1200 panic_halt_ipmi_set_timeout();
1195 } 1201 }
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 71cfdf7c9708..2eb5f0efae90 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -1,4 +1,5 @@
1menu "Clock Source drivers" 1menu "Clock Source drivers"
2 depends on !ARCH_USES_GETTIMEOFFSET
2 3
3config CLKSRC_OF 4config CLKSRC_OF
4 bool 5 bool
diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/fsl_ftm_timer.c
index 10202f1fdfd7..517e1c7624d4 100644
--- a/drivers/clocksource/fsl_ftm_timer.c
+++ b/drivers/clocksource/fsl_ftm_timer.c
@@ -203,7 +203,7 @@ static int __init ftm_clockevent_init(unsigned long freq, int irq)
203 int err; 203 int err;
204 204
205 ftm_writel(0x00, priv->clkevt_base + FTM_CNTIN); 205 ftm_writel(0x00, priv->clkevt_base + FTM_CNTIN);
206 ftm_writel(~0UL, priv->clkevt_base + FTM_MOD); 206 ftm_writel(~0u, priv->clkevt_base + FTM_MOD);
207 207
208 ftm_reset_counter(priv->clkevt_base); 208 ftm_reset_counter(priv->clkevt_base);
209 209
@@ -230,7 +230,7 @@ static int __init ftm_clocksource_init(unsigned long freq)
230 int err; 230 int err;
231 231
232 ftm_writel(0x00, priv->clksrc_base + FTM_CNTIN); 232 ftm_writel(0x00, priv->clksrc_base + FTM_CNTIN);
233 ftm_writel(~0UL, priv->clksrc_base + FTM_MOD); 233 ftm_writel(~0u, priv->clksrc_base + FTM_MOD);
234 234
235 ftm_reset_counter(priv->clksrc_base); 235 ftm_reset_counter(priv->clksrc_base);
236 236
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 1582c1c016b0..235a1ba73d92 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -84,6 +84,7 @@ config ARM_KIRKWOOD_CPUFREQ
84config ARM_MT8173_CPUFREQ 84config ARM_MT8173_CPUFREQ
85 bool "Mediatek MT8173 CPUFreq support" 85 bool "Mediatek MT8173 CPUFreq support"
86 depends on ARCH_MEDIATEK && REGULATOR 86 depends on ARCH_MEDIATEK && REGULATOR
87 depends on ARM64 || (ARM_CPU_TOPOLOGY && COMPILE_TEST)
87 depends on !CPU_THERMAL || THERMAL=y 88 depends on !CPU_THERMAL || THERMAL=y
88 select PM_OPP 89 select PM_OPP
89 help 90 help
@@ -201,7 +202,7 @@ config ARM_SA1110_CPUFREQ
201 202
202config ARM_SCPI_CPUFREQ 203config ARM_SCPI_CPUFREQ
203 tristate "SCPI based CPUfreq driver" 204 tristate "SCPI based CPUfreq driver"
204 depends on ARM_BIG_LITTLE_CPUFREQ && ARM_SCPI_PROTOCOL 205 depends on ARM_BIG_LITTLE_CPUFREQ && ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI
205 help 206 help
206 This adds the CPUfreq driver support for ARM big.LITTLE platforms 207 This adds the CPUfreq driver support for ARM big.LITTLE platforms
207 using SCPI protocol for CPU power management. 208 using SCPI protocol for CPU power management.
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index adbd1de1cea5..c59bdcb83217 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -5,7 +5,6 @@
5config X86_INTEL_PSTATE 5config X86_INTEL_PSTATE
6 bool "Intel P state control" 6 bool "Intel P state control"
7 depends on X86 7 depends on X86
8 select ACPI_PROCESSOR if ACPI
9 help 8 help
10 This driver provides a P state for Intel core processors. 9 This driver provides a P state for Intel core processors.
11 The driver implements an internal governor and will become 10 The driver implements an internal governor and will become
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index e8cb334094b0..7c0bdfb1a2ca 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -98,10 +98,11 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
98 policy->max = cpu->perf_caps.highest_perf; 98 policy->max = cpu->perf_caps.highest_perf;
99 policy->cpuinfo.min_freq = policy->min; 99 policy->cpuinfo.min_freq = policy->min;
100 policy->cpuinfo.max_freq = policy->max; 100 policy->cpuinfo.max_freq = policy->max;
101 policy->shared_type = cpu->shared_type;
101 102
102 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) 103 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
103 cpumask_copy(policy->cpus, cpu->shared_cpu_map); 104 cpumask_copy(policy->cpus, cpu->shared_cpu_map);
104 else { 105 else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) {
105 /* Support only SW_ANY for now. */ 106 /* Support only SW_ANY for now. */
106 pr_debug("Unsupported CPU co-ord type\n"); 107 pr_debug("Unsupported CPU co-ord type\n");
107 return -EFAULT; 108 return -EFAULT;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 7c48e7316d91..8412ce5f93a7 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -976,10 +976,14 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy)
976 976
977 new_policy.governor = gov; 977 new_policy.governor = gov;
978 978
979 /* Use the default policy if its valid. */ 979 /* Use the default policy if there is no last_policy. */
980 if (cpufreq_driver->setpolicy) 980 if (cpufreq_driver->setpolicy) {
981 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL); 981 if (policy->last_policy)
982 982 new_policy.policy = policy->last_policy;
983 else
984 cpufreq_parse_governor(gov->name, &new_policy.policy,
985 NULL);
986 }
983 /* set default policy */ 987 /* set default policy */
984 return cpufreq_set_policy(policy, &new_policy); 988 return cpufreq_set_policy(policy, &new_policy);
985} 989}
@@ -1330,6 +1334,8 @@ static void cpufreq_offline_prepare(unsigned int cpu)
1330 if (has_target()) 1334 if (has_target())
1331 strncpy(policy->last_governor, policy->governor->name, 1335 strncpy(policy->last_governor, policy->governor->name,
1332 CPUFREQ_NAME_LEN); 1336 CPUFREQ_NAME_LEN);
1337 else
1338 policy->last_policy = policy->policy;
1333 } else if (cpu == policy->cpu) { 1339 } else if (cpu == policy->cpu) {
1334 /* Nominate new CPU */ 1340 /* Nominate new CPU */
1335 policy->cpu = cpumask_any(policy->cpus); 1341 policy->cpu = cpumask_any(policy->cpus);
@@ -1401,13 +1407,10 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1401 } 1407 }
1402 1408
1403 cpumask_clear_cpu(cpu, policy->real_cpus); 1409 cpumask_clear_cpu(cpu, policy->real_cpus);
1410 remove_cpu_dev_symlink(policy, cpu);
1404 1411
1405 if (cpumask_empty(policy->real_cpus)) { 1412 if (cpumask_empty(policy->real_cpus))
1406 cpufreq_policy_free(policy, true); 1413 cpufreq_policy_free(policy, true);
1407 return;
1408 }
1409
1410 remove_cpu_dev_symlink(policy, cpu);
1411} 1414}
1412 1415
1413static void handle_update(struct work_struct *work) 1416static void handle_update(struct work_struct *work)
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 2e31d097def6..4d07cbd2b23c 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -34,14 +34,10 @@
34#include <asm/cpu_device_id.h> 34#include <asm/cpu_device_id.h>
35#include <asm/cpufeature.h> 35#include <asm/cpufeature.h>
36 36
37#if IS_ENABLED(CONFIG_ACPI) 37#define ATOM_RATIOS 0x66a
38#include <acpi/processor.h> 38#define ATOM_VIDS 0x66b
39#endif 39#define ATOM_TURBO_RATIOS 0x66c
40 40#define ATOM_TURBO_VIDS 0x66d
41#define BYT_RATIOS 0x66a
42#define BYT_VIDS 0x66b
43#define BYT_TURBO_RATIOS 0x66c
44#define BYT_TURBO_VIDS 0x66d
45 41
46#define FRAC_BITS 8 42#define FRAC_BITS 8
47#define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 43#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
@@ -117,9 +113,6 @@ struct cpudata {
117 u64 prev_mperf; 113 u64 prev_mperf;
118 u64 prev_tsc; 114 u64 prev_tsc;
119 struct sample sample; 115 struct sample sample;
120#if IS_ENABLED(CONFIG_ACPI)
121 struct acpi_processor_performance acpi_perf_data;
122#endif
123}; 116};
124 117
125static struct cpudata **all_cpu_data; 118static struct cpudata **all_cpu_data;
@@ -150,7 +143,6 @@ struct cpu_defaults {
150static struct pstate_adjust_policy pid_params; 143static struct pstate_adjust_policy pid_params;
151static struct pstate_funcs pstate_funcs; 144static struct pstate_funcs pstate_funcs;
152static int hwp_active; 145static int hwp_active;
153static int no_acpi_perf;
154 146
155struct perf_limits { 147struct perf_limits {
156 int no_turbo; 148 int no_turbo;
@@ -163,8 +155,6 @@ struct perf_limits {
163 int max_sysfs_pct; 155 int max_sysfs_pct;
164 int min_policy_pct; 156 int min_policy_pct;
165 int min_sysfs_pct; 157 int min_sysfs_pct;
166 int max_perf_ctl;
167 int min_perf_ctl;
168}; 158};
169 159
170static struct perf_limits performance_limits = { 160static struct perf_limits performance_limits = {
@@ -191,8 +181,6 @@ static struct perf_limits powersave_limits = {
191 .max_sysfs_pct = 100, 181 .max_sysfs_pct = 100,
192 .min_policy_pct = 0, 182 .min_policy_pct = 0,
193 .min_sysfs_pct = 0, 183 .min_sysfs_pct = 0,
194 .max_perf_ctl = 0,
195 .min_perf_ctl = 0,
196}; 184};
197 185
198#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE 186#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
@@ -201,153 +189,6 @@ static struct perf_limits *limits = &performance_limits;
201static struct perf_limits *limits = &powersave_limits; 189static struct perf_limits *limits = &powersave_limits;
202#endif 190#endif
203 191
204#if IS_ENABLED(CONFIG_ACPI)
205/*
206 * The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and
207 * in TURBO_RATIO_LIMIT MSR, which pstate driver stores in max_pstate and
208 * max_turbo_pstate fields. The PERF_CTL MSR contains 16 bit value for P state
209 * ratio, out of it only high 8 bits are used. For example 0x1700 is setting
210 * target ratio 0x17. The _PSS control value stores in a format which can be
211 * directly written to PERF_CTL MSR. But in intel_pstate driver this shift
212 * occurs during write to PERF_CTL (E.g. for cores core_set_pstate()).
213 * This function converts the _PSS control value to intel pstate driver format
214 * for comparison and assignment.
215 */
216static int convert_to_native_pstate_format(struct cpudata *cpu, int index)
217{
218 return cpu->acpi_perf_data.states[index].control >> 8;
219}
220
221static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy)
222{
223 struct cpudata *cpu;
224 int ret;
225 bool turbo_absent = false;
226 int max_pstate_index;
227 int min_pss_ctl, max_pss_ctl, turbo_pss_ctl;
228 int i;
229
230 cpu = all_cpu_data[policy->cpu];
231
232 pr_debug("intel_pstate: default limits 0x%x 0x%x 0x%x\n",
233 cpu->pstate.min_pstate, cpu->pstate.max_pstate,
234 cpu->pstate.turbo_pstate);
235
236 if (!cpu->acpi_perf_data.shared_cpu_map &&
237 zalloc_cpumask_var_node(&cpu->acpi_perf_data.shared_cpu_map,
238 GFP_KERNEL, cpu_to_node(policy->cpu))) {
239 return -ENOMEM;
240 }
241
242 ret = acpi_processor_register_performance(&cpu->acpi_perf_data,
243 policy->cpu);
244 if (ret)
245 return ret;
246
247 /*
248 * Check if the control value in _PSS is for PERF_CTL MSR, which should
249 * guarantee that the states returned by it map to the states in our
250 * list directly.
251 */
252 if (cpu->acpi_perf_data.control_register.space_id !=
253 ACPI_ADR_SPACE_FIXED_HARDWARE)
254 return -EIO;
255
256 pr_debug("intel_pstate: CPU%u - ACPI _PSS perf data\n", policy->cpu);
257 for (i = 0; i < cpu->acpi_perf_data.state_count; i++)
258 pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n",
259 (i == cpu->acpi_perf_data.state ? '*' : ' '), i,
260 (u32) cpu->acpi_perf_data.states[i].core_frequency,
261 (u32) cpu->acpi_perf_data.states[i].power,
262 (u32) cpu->acpi_perf_data.states[i].control);
263
264 /*
265 * If there is only one entry _PSS, simply ignore _PSS and continue as
266 * usual without taking _PSS into account
267 */
268 if (cpu->acpi_perf_data.state_count < 2)
269 return 0;
270
271 turbo_pss_ctl = convert_to_native_pstate_format(cpu, 0);
272 min_pss_ctl = convert_to_native_pstate_format(cpu,
273 cpu->acpi_perf_data.state_count - 1);
274 /* Check if there is a turbo freq in _PSS */
275 if (turbo_pss_ctl <= cpu->pstate.max_pstate &&
276 turbo_pss_ctl > cpu->pstate.min_pstate) {
277 pr_debug("intel_pstate: no turbo range exists in _PSS\n");
278 limits->no_turbo = limits->turbo_disabled = 1;
279 cpu->pstate.turbo_pstate = cpu->pstate.max_pstate;
280 turbo_absent = true;
281 }
282
283 /* Check if the max non turbo p state < Intel P state max */
284 max_pstate_index = turbo_absent ? 0 : 1;
285 max_pss_ctl = convert_to_native_pstate_format(cpu, max_pstate_index);
286 if (max_pss_ctl < cpu->pstate.max_pstate &&
287 max_pss_ctl > cpu->pstate.min_pstate)
288 cpu->pstate.max_pstate = max_pss_ctl;
289
290 /* check If min perf > Intel P State min */
291 if (min_pss_ctl > cpu->pstate.min_pstate &&
292 min_pss_ctl < cpu->pstate.max_pstate) {
293 cpu->pstate.min_pstate = min_pss_ctl;
294 policy->cpuinfo.min_freq = min_pss_ctl * cpu->pstate.scaling;
295 }
296
297 if (turbo_absent)
298 policy->cpuinfo.max_freq = cpu->pstate.max_pstate *
299 cpu->pstate.scaling;
300 else {
301 policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate *
302 cpu->pstate.scaling;
303 /*
304 * The _PSS table doesn't contain whole turbo frequency range.
305 * This just contains +1 MHZ above the max non turbo frequency,
306 * with control value corresponding to max turbo ratio. But
307 * when cpufreq set policy is called, it will call with this
308 * max frequency, which will cause a reduced performance as
309 * this driver uses real max turbo frequency as the max
310 * frequeny. So correct this frequency in _PSS table to
311 * correct max turbo frequency based on the turbo ratio.
312 * Also need to convert to MHz as _PSS freq is in MHz.
313 */
314 cpu->acpi_perf_data.states[0].core_frequency =
315 turbo_pss_ctl * 100;
316 }
317
318 pr_debug("intel_pstate: Updated limits using _PSS 0x%x 0x%x 0x%x\n",
319 cpu->pstate.min_pstate, cpu->pstate.max_pstate,
320 cpu->pstate.turbo_pstate);
321 pr_debug("intel_pstate: policy max_freq=%d Khz min_freq = %d KHz\n",
322 policy->cpuinfo.max_freq, policy->cpuinfo.min_freq);
323
324 return 0;
325}
326
327static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
328{
329 struct cpudata *cpu;
330
331 if (!no_acpi_perf)
332 return 0;
333
334 cpu = all_cpu_data[policy->cpu];
335 acpi_processor_unregister_performance(policy->cpu);
336 return 0;
337}
338
339#else
340static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy)
341{
342 return 0;
343}
344
345static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
346{
347 return 0;
348}
349#endif
350
351static inline void pid_reset(struct _pid *pid, int setpoint, int busy, 192static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
352 int deadband, int integral) { 193 int deadband, int integral) {
353 pid->setpoint = setpoint; 194 pid->setpoint = setpoint;
@@ -687,31 +528,31 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
687 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); 528 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
688} 529}
689 530
690static int byt_get_min_pstate(void) 531static int atom_get_min_pstate(void)
691{ 532{
692 u64 value; 533 u64 value;
693 534
694 rdmsrl(BYT_RATIOS, value); 535 rdmsrl(ATOM_RATIOS, value);
695 return (value >> 8) & 0x7F; 536 return (value >> 8) & 0x7F;
696} 537}
697 538
698static int byt_get_max_pstate(void) 539static int atom_get_max_pstate(void)
699{ 540{
700 u64 value; 541 u64 value;
701 542
702 rdmsrl(BYT_RATIOS, value); 543 rdmsrl(ATOM_RATIOS, value);
703 return (value >> 16) & 0x7F; 544 return (value >> 16) & 0x7F;
704} 545}
705 546
706static int byt_get_turbo_pstate(void) 547static int atom_get_turbo_pstate(void)
707{ 548{
708 u64 value; 549 u64 value;
709 550
710 rdmsrl(BYT_TURBO_RATIOS, value); 551 rdmsrl(ATOM_TURBO_RATIOS, value);
711 return value & 0x7F; 552 return value & 0x7F;
712} 553}
713 554
714static void byt_set_pstate(struct cpudata *cpudata, int pstate) 555static void atom_set_pstate(struct cpudata *cpudata, int pstate)
715{ 556{
716 u64 val; 557 u64 val;
717 int32_t vid_fp; 558 int32_t vid_fp;
@@ -736,27 +577,42 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
736 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); 577 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
737} 578}
738 579
739#define BYT_BCLK_FREQS 5 580static int silvermont_get_scaling(void)
740static int byt_freq_table[BYT_BCLK_FREQS] = { 833, 1000, 1333, 1167, 800};
741
742static int byt_get_scaling(void)
743{ 581{
744 u64 value; 582 u64 value;
745 int i; 583 int i;
584 /* Defined in Table 35-6 from SDM (Sept 2015) */
585 static int silvermont_freq_table[] = {
586 83300, 100000, 133300, 116700, 80000};
746 587
747 rdmsrl(MSR_FSB_FREQ, value); 588 rdmsrl(MSR_FSB_FREQ, value);
748 i = value & 0x3; 589 i = value & 0x7;
590 WARN_ON(i > 4);
749 591
750 BUG_ON(i > BYT_BCLK_FREQS); 592 return silvermont_freq_table[i];
593}
751 594
752 return byt_freq_table[i] * 100; 595static int airmont_get_scaling(void)
596{
597 u64 value;
598 int i;
599 /* Defined in Table 35-10 from SDM (Sept 2015) */
600 static int airmont_freq_table[] = {
601 83300, 100000, 133300, 116700, 80000,
602 93300, 90000, 88900, 87500};
603
604 rdmsrl(MSR_FSB_FREQ, value);
605 i = value & 0xF;
606 WARN_ON(i > 8);
607
608 return airmont_freq_table[i];
753} 609}
754 610
755static void byt_get_vid(struct cpudata *cpudata) 611static void atom_get_vid(struct cpudata *cpudata)
756{ 612{
757 u64 value; 613 u64 value;
758 614
759 rdmsrl(BYT_VIDS, value); 615 rdmsrl(ATOM_VIDS, value);
760 cpudata->vid.min = int_tofp((value >> 8) & 0x7f); 616 cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
761 cpudata->vid.max = int_tofp((value >> 16) & 0x7f); 617 cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
762 cpudata->vid.ratio = div_fp( 618 cpudata->vid.ratio = div_fp(
@@ -764,7 +620,7 @@ static void byt_get_vid(struct cpudata *cpudata)
764 int_tofp(cpudata->pstate.max_pstate - 620 int_tofp(cpudata->pstate.max_pstate -
765 cpudata->pstate.min_pstate)); 621 cpudata->pstate.min_pstate));
766 622
767 rdmsrl(BYT_TURBO_VIDS, value); 623 rdmsrl(ATOM_TURBO_VIDS, value);
768 cpudata->vid.turbo = value & 0x7f; 624 cpudata->vid.turbo = value & 0x7f;
769} 625}
770 626
@@ -885,7 +741,7 @@ static struct cpu_defaults core_params = {
885 }, 741 },
886}; 742};
887 743
888static struct cpu_defaults byt_params = { 744static struct cpu_defaults silvermont_params = {
889 .pid_policy = { 745 .pid_policy = {
890 .sample_rate_ms = 10, 746 .sample_rate_ms = 10,
891 .deadband = 0, 747 .deadband = 0,
@@ -895,13 +751,33 @@ static struct cpu_defaults byt_params = {
895 .i_gain_pct = 4, 751 .i_gain_pct = 4,
896 }, 752 },
897 .funcs = { 753 .funcs = {
898 .get_max = byt_get_max_pstate, 754 .get_max = atom_get_max_pstate,
899 .get_max_physical = byt_get_max_pstate, 755 .get_max_physical = atom_get_max_pstate,
900 .get_min = byt_get_min_pstate, 756 .get_min = atom_get_min_pstate,
901 .get_turbo = byt_get_turbo_pstate, 757 .get_turbo = atom_get_turbo_pstate,
902 .set = byt_set_pstate, 758 .set = atom_set_pstate,
903 .get_scaling = byt_get_scaling, 759 .get_scaling = silvermont_get_scaling,
904 .get_vid = byt_get_vid, 760 .get_vid = atom_get_vid,
761 },
762};
763
764static struct cpu_defaults airmont_params = {
765 .pid_policy = {
766 .sample_rate_ms = 10,
767 .deadband = 0,
768 .setpoint = 60,
769 .p_gain_pct = 14,
770 .d_gain_pct = 0,
771 .i_gain_pct = 4,
772 },
773 .funcs = {
774 .get_max = atom_get_max_pstate,
775 .get_max_physical = atom_get_max_pstate,
776 .get_min = atom_get_min_pstate,
777 .get_turbo = atom_get_turbo_pstate,
778 .set = atom_set_pstate,
779 .get_scaling = airmont_get_scaling,
780 .get_vid = atom_get_vid,
905 }, 781 },
906}; 782};
907 783
@@ -938,23 +814,12 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
938 * policy, or by cpu specific default values determined through 814 * policy, or by cpu specific default values determined through
939 * experimentation. 815 * experimentation.
940 */ 816 */
941 if (limits->max_perf_ctl && limits->max_sysfs_pct >= 817 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits->max_perf));
942 limits->max_policy_pct) { 818 *max = clamp_t(int, max_perf_adj,
943 *max = limits->max_perf_ctl; 819 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
944 } else {
945 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf),
946 limits->max_perf));
947 *max = clamp_t(int, max_perf_adj, cpu->pstate.min_pstate,
948 cpu->pstate.turbo_pstate);
949 }
950 820
951 if (limits->min_perf_ctl) { 821 min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits->min_perf));
952 *min = limits->min_perf_ctl; 822 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
953 } else {
954 min_perf = fp_toint(mul_fp(int_tofp(max_perf),
955 limits->min_perf));
956 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
957 }
958} 823}
959 824
960static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force) 825static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force)
@@ -1153,7 +1018,7 @@ static void intel_pstate_timer_func(unsigned long __data)
1153static const struct x86_cpu_id intel_pstate_cpu_ids[] = { 1018static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
1154 ICPU(0x2a, core_params), 1019 ICPU(0x2a, core_params),
1155 ICPU(0x2d, core_params), 1020 ICPU(0x2d, core_params),
1156 ICPU(0x37, byt_params), 1021 ICPU(0x37, silvermont_params),
1157 ICPU(0x3a, core_params), 1022 ICPU(0x3a, core_params),
1158 ICPU(0x3c, core_params), 1023 ICPU(0x3c, core_params),
1159 ICPU(0x3d, core_params), 1024 ICPU(0x3d, core_params),
@@ -1162,7 +1027,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
1162 ICPU(0x45, core_params), 1027 ICPU(0x45, core_params),
1163 ICPU(0x46, core_params), 1028 ICPU(0x46, core_params),
1164 ICPU(0x47, core_params), 1029 ICPU(0x47, core_params),
1165 ICPU(0x4c, byt_params), 1030 ICPU(0x4c, airmont_params),
1166 ICPU(0x4e, core_params), 1031 ICPU(0x4e, core_params),
1167 ICPU(0x4f, core_params), 1032 ICPU(0x4f, core_params),
1168 ICPU(0x5e, core_params), 1033 ICPU(0x5e, core_params),
@@ -1229,12 +1094,6 @@ static unsigned int intel_pstate_get(unsigned int cpu_num)
1229 1094
1230static int intel_pstate_set_policy(struct cpufreq_policy *policy) 1095static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1231{ 1096{
1232#if IS_ENABLED(CONFIG_ACPI)
1233 struct cpudata *cpu;
1234 int i;
1235#endif
1236 pr_debug("intel_pstate: %s max %u policy->max %u\n", __func__,
1237 policy->cpuinfo.max_freq, policy->max);
1238 if (!policy->cpuinfo.max_freq) 1097 if (!policy->cpuinfo.max_freq)
1239 return -ENODEV; 1098 return -ENODEV;
1240 1099
@@ -1242,6 +1101,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1242 policy->max >= policy->cpuinfo.max_freq) { 1101 policy->max >= policy->cpuinfo.max_freq) {
1243 pr_debug("intel_pstate: set performance\n"); 1102 pr_debug("intel_pstate: set performance\n");
1244 limits = &performance_limits; 1103 limits = &performance_limits;
1104 if (hwp_active)
1105 intel_pstate_hwp_set();
1245 return 0; 1106 return 0;
1246 } 1107 }
1247 1108
@@ -1249,7 +1110,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1249 limits = &powersave_limits; 1110 limits = &powersave_limits;
1250 limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 1111 limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
1251 limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100); 1112 limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100);
1252 limits->max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq; 1113 limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100,
1114 policy->cpuinfo.max_freq);
1253 limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0 , 100); 1115 limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0 , 100);
1254 1116
1255 /* Normalize user input to [min_policy_pct, max_policy_pct] */ 1117 /* Normalize user input to [min_policy_pct, max_policy_pct] */
@@ -1261,6 +1123,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1261 limits->max_sysfs_pct); 1123 limits->max_sysfs_pct);
1262 limits->max_perf_pct = max(limits->min_policy_pct, 1124 limits->max_perf_pct = max(limits->min_policy_pct,
1263 limits->max_perf_pct); 1125 limits->max_perf_pct);
1126 limits->max_perf = round_up(limits->max_perf, 8);
1264 1127
1265 /* Make sure min_perf_pct <= max_perf_pct */ 1128 /* Make sure min_perf_pct <= max_perf_pct */
1266 limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); 1129 limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
@@ -1270,23 +1133,6 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1270 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct), 1133 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
1271 int_tofp(100)); 1134 int_tofp(100));
1272 1135
1273#if IS_ENABLED(CONFIG_ACPI)
1274 cpu = all_cpu_data[policy->cpu];
1275 for (i = 0; i < cpu->acpi_perf_data.state_count; i++) {
1276 int control;
1277
1278 control = convert_to_native_pstate_format(cpu, i);
1279 if (control * cpu->pstate.scaling == policy->max)
1280 limits->max_perf_ctl = control;
1281 if (control * cpu->pstate.scaling == policy->min)
1282 limits->min_perf_ctl = control;
1283 }
1284
1285 pr_debug("intel_pstate: max %u policy_max %u perf_ctl [0x%x-0x%x]\n",
1286 policy->cpuinfo.max_freq, policy->max, limits->min_perf_ctl,
1287 limits->max_perf_ctl);
1288#endif
1289
1290 if (hwp_active) 1136 if (hwp_active)
1291 intel_pstate_hwp_set(); 1137 intel_pstate_hwp_set();
1292 1138
@@ -1341,30 +1187,18 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
1341 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; 1187 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
1342 policy->cpuinfo.max_freq = 1188 policy->cpuinfo.max_freq =
1343 cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1189 cpu->pstate.turbo_pstate * cpu->pstate.scaling;
1344 if (!no_acpi_perf)
1345 intel_pstate_init_perf_limits(policy);
1346 /*
1347 * If there is no acpi perf data or error, we ignore and use Intel P
1348 * state calculated limits, So this is not fatal error.
1349 */
1350 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 1190 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
1351 cpumask_set_cpu(policy->cpu, policy->cpus); 1191 cpumask_set_cpu(policy->cpu, policy->cpus);
1352 1192
1353 return 0; 1193 return 0;
1354} 1194}
1355 1195
1356static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
1357{
1358 return intel_pstate_exit_perf_limits(policy);
1359}
1360
1361static struct cpufreq_driver intel_pstate_driver = { 1196static struct cpufreq_driver intel_pstate_driver = {
1362 .flags = CPUFREQ_CONST_LOOPS, 1197 .flags = CPUFREQ_CONST_LOOPS,
1363 .verify = intel_pstate_verify_policy, 1198 .verify = intel_pstate_verify_policy,
1364 .setpolicy = intel_pstate_set_policy, 1199 .setpolicy = intel_pstate_set_policy,
1365 .get = intel_pstate_get, 1200 .get = intel_pstate_get,
1366 .init = intel_pstate_cpu_init, 1201 .init = intel_pstate_cpu_init,
1367 .exit = intel_pstate_cpu_exit,
1368 .stop_cpu = intel_pstate_stop_cpu, 1202 .stop_cpu = intel_pstate_stop_cpu,
1369 .name = "intel_pstate", 1203 .name = "intel_pstate",
1370}; 1204};
@@ -1406,6 +1240,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs)
1406} 1240}
1407 1241
1408#if IS_ENABLED(CONFIG_ACPI) 1242#if IS_ENABLED(CONFIG_ACPI)
1243#include <acpi/processor.h>
1409 1244
1410static bool intel_pstate_no_acpi_pss(void) 1245static bool intel_pstate_no_acpi_pss(void)
1411{ 1246{
@@ -1601,9 +1436,6 @@ static int __init intel_pstate_setup(char *str)
1601 force_load = 1; 1436 force_load = 1;
1602 if (!strcmp(str, "hwp_only")) 1437 if (!strcmp(str, "hwp_only"))
1603 hwp_only = 1; 1438 hwp_only = 1;
1604 if (!strcmp(str, "no_acpi"))
1605 no_acpi_perf = 1;
1606
1607 return 0; 1439 return 0;
1608} 1440}
1609early_param("intel_pstate", intel_pstate_setup); 1441early_param("intel_pstate", intel_pstate_setup);
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index 73ef49922788..7038f364acb5 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -409,7 +409,7 @@ static int ccm_nx_decrypt(struct aead_request *req,
409 processed += to_process; 409 processed += to_process;
410 } while (processed < nbytes); 410 } while (processed < nbytes);
411 411
412 rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag, 412 rc = crypto_memneq(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag,
413 authsize) ? -EBADMSG : 0; 413 authsize) ? -EBADMSG : 0;
414out: 414out:
415 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); 415 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index eee624f589b6..abd465f479c4 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -21,6 +21,7 @@
21 21
22#include <crypto/internal/aead.h> 22#include <crypto/internal/aead.h>
23#include <crypto/aes.h> 23#include <crypto/aes.h>
24#include <crypto/algapi.h>
24#include <crypto/scatterwalk.h> 25#include <crypto/scatterwalk.h>
25#include <linux/module.h> 26#include <linux/module.h>
26#include <linux/types.h> 27#include <linux/types.h>
@@ -418,7 +419,7 @@ mac:
418 itag, req->src, req->assoclen + nbytes, 419 itag, req->src, req->assoclen + nbytes,
419 crypto_aead_authsize(crypto_aead_reqtfm(req)), 420 crypto_aead_authsize(crypto_aead_reqtfm(req)),
420 SCATTERWALK_FROM_SG); 421 SCATTERWALK_FROM_SG);
421 rc = memcmp(itag, otag, 422 rc = crypto_memneq(itag, otag,
422 crypto_aead_authsize(crypto_aead_reqtfm(req))) ? 423 crypto_aead_authsize(crypto_aead_reqtfm(req))) ?
423 -EBADMSG : 0; 424 -EBADMSG : 0;
424 } 425 }
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index 03856ad280b9..473d36d91644 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -198,7 +198,7 @@ static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev,
198 goto out_err; 198 goto out_err;
199 } 199 }
200 200
201 params_head = section_head->params; 201 params_head = section.params;
202 202
203 while (params_head) { 203 while (params_head) {
204 if (copy_from_user(&key_val, (void __user *)params_head, 204 if (copy_from_user(&key_val, (void __user *)params_head,
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 46f531e19ccf..b6f9f42e2985 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -977,7 +977,7 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
977 } else 977 } else
978 oicv = (char *)&edesc->link_tbl[0]; 978 oicv = (char *)&edesc->link_tbl[0];
979 979
980 err = memcmp(oicv, icv, authsize) ? -EBADMSG : 0; 980 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
981 } 981 }
982 982
983 kfree(edesc); 983 kfree(edesc);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 4e55239c7a30..53d22eb73b56 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -729,8 +729,8 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
729 return NULL; 729 return NULL;
730 730
731 dev_info(chan2dev(chan), 731 dev_info(chan2dev(chan),
732 "%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n", 732 "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
733 __func__, xt->src_start, xt->dst_start, xt->numf, 733 __func__, &xt->src_start, &xt->dst_start, xt->numf,
734 xt->frame_size, flags); 734 xt->frame_size, flags);
735 735
736 /* 736 /*
@@ -824,8 +824,8 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
824 u32 ctrla; 824 u32 ctrla;
825 u32 ctrlb; 825 u32 ctrlb;
826 826
827 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n", 827 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d%pad s%pad l0x%zx f0x%lx\n",
828 dest, src, len, flags); 828 &dest, &src, len, flags);
829 829
830 if (unlikely(!len)) { 830 if (unlikely(!len)) {
831 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); 831 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
@@ -938,8 +938,8 @@ atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
938 void __iomem *vaddr; 938 void __iomem *vaddr;
939 dma_addr_t paddr; 939 dma_addr_t paddr;
940 940
941 dev_vdbg(chan2dev(chan), "%s: d0x%x v0x%x l0x%zx f0x%lx\n", __func__, 941 dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__,
942 dest, value, len, flags); 942 &dest, value, len, flags);
943 943
944 if (unlikely(!len)) { 944 if (unlikely(!len)) {
945 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); 945 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
@@ -1022,8 +1022,8 @@ atc_prep_dma_memset_sg(struct dma_chan *chan,
1022 dma_addr_t dest = sg_dma_address(sg); 1022 dma_addr_t dest = sg_dma_address(sg);
1023 size_t len = sg_dma_len(sg); 1023 size_t len = sg_dma_len(sg);
1024 1024
1025 dev_vdbg(chan2dev(chan), "%s: d0x%08x, l0x%zx\n", 1025 dev_vdbg(chan2dev(chan), "%s: d%pad, l0x%zx\n",
1026 __func__, dest, len); 1026 __func__, &dest, len);
1027 1027
1028 if (!is_dma_fill_aligned(chan->device, dest, 0, len)) { 1028 if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
1029 dev_err(chan2dev(chan), "%s: buffer is not aligned\n", 1029 dev_err(chan2dev(chan), "%s: buffer is not aligned\n",
@@ -1439,9 +1439,9 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1439 unsigned int periods = buf_len / period_len; 1439 unsigned int periods = buf_len / period_len;
1440 unsigned int i; 1440 unsigned int i;
1441 1441
1442 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n", 1442 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@%pad - %d (%d/%d)\n",
1443 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", 1443 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
1444 buf_addr, 1444 &buf_addr,
1445 periods, buf_len, period_len); 1445 periods, buf_len, period_len);
1446 1446
1447 if (unlikely(!atslave || !buf_len || !period_len)) { 1447 if (unlikely(!atslave || !buf_len || !period_len)) {
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index d1cfc8c876f9..7f58f06157f6 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -385,9 +385,9 @@ static void vdbg_dump_regs(struct at_dma_chan *atchan) {}
385static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli) 385static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli)
386{ 386{
387 dev_crit(chan2dev(&atchan->chan_common), 387 dev_crit(chan2dev(&atchan->chan_common),
388 " desc: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n", 388 " desc: s%pad d%pad ctrl0x%x:0x%x l0x%pad\n",
389 lli->saddr, lli->daddr, 389 &lli->saddr, &lli->daddr,
390 lli->ctrla, lli->ctrlb, lli->dscr); 390 lli->ctrla, lli->ctrlb, &lli->dscr);
391} 391}
392 392
393 393
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index b5e132d4bae5..7f039de143f0 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -920,8 +920,8 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
920 desc->lld.mbr_cfg = chan_cc; 920 desc->lld.mbr_cfg = chan_cc;
921 921
922 dev_dbg(chan2dev(chan), 922 dev_dbg(chan2dev(chan),
923 "%s: lld: mbr_sa=0x%08x, mbr_da=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", 923 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
924 __func__, desc->lld.mbr_sa, desc->lld.mbr_da, 924 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da,
925 desc->lld.mbr_ubc, desc->lld.mbr_cfg); 925 desc->lld.mbr_ubc, desc->lld.mbr_cfg);
926 926
927 /* Chain lld. */ 927 /* Chain lld. */
@@ -953,8 +953,8 @@ at_xdmac_prep_interleaved(struct dma_chan *chan,
953 if ((xt->numf > 1) && (xt->frame_size > 1)) 953 if ((xt->numf > 1) && (xt->frame_size > 1))
954 return NULL; 954 return NULL;
955 955
956 dev_dbg(chan2dev(chan), "%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n", 956 dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
957 __func__, xt->src_start, xt->dst_start, xt->numf, 957 __func__, &xt->src_start, &xt->dst_start, xt->numf,
958 xt->frame_size, flags); 958 xt->frame_size, flags);
959 959
960 src_addr = xt->src_start; 960 src_addr = xt->src_start;
@@ -1179,8 +1179,8 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
1179 desc->lld.mbr_cfg = chan_cc; 1179 desc->lld.mbr_cfg = chan_cc;
1180 1180
1181 dev_dbg(chan2dev(chan), 1181 dev_dbg(chan2dev(chan),
1182 "%s: lld: mbr_da=0x%08x, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", 1182 "%s: lld: mbr_da=%pad, mbr_ds=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
1183 __func__, desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc, 1183 __func__, &desc->lld.mbr_da, &desc->lld.mbr_ds, desc->lld.mbr_ubc,
1184 desc->lld.mbr_cfg); 1184 desc->lld.mbr_cfg);
1185 1185
1186 return desc; 1186 return desc;
@@ -1193,8 +1193,8 @@ at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
1193 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 1193 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1194 struct at_xdmac_desc *desc; 1194 struct at_xdmac_desc *desc;
1195 1195
1196 dev_dbg(chan2dev(chan), "%s: dest=0x%08x, len=%d, pattern=0x%x, flags=0x%lx\n", 1196 dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n",
1197 __func__, dest, len, value, flags); 1197 __func__, &dest, len, value, flags);
1198 1198
1199 if (unlikely(!len)) 1199 if (unlikely(!len))
1200 return NULL; 1200 return NULL;
@@ -1229,8 +1229,8 @@ at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
1229 1229
1230 /* Prepare descriptors. */ 1230 /* Prepare descriptors. */
1231 for_each_sg(sgl, sg, sg_len, i) { 1231 for_each_sg(sgl, sg, sg_len, i) {
1232 dev_dbg(chan2dev(chan), "%s: dest=0x%08x, len=%d, pattern=0x%x, flags=0x%lx\n", 1232 dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n",
1233 __func__, sg_dma_address(sg), sg_dma_len(sg), 1233 __func__, &sg_dma_address(sg), sg_dma_len(sg),
1234 value, flags); 1234 value, flags);
1235 desc = at_xdmac_memset_create_desc(chan, atchan, 1235 desc = at_xdmac_memset_create_desc(chan, atchan,
1236 sg_dma_address(sg), 1236 sg_dma_address(sg),
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 6b03e4e84e6b..0675e268d577 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -107,7 +107,7 @@
107 107
108/* CCCFG register */ 108/* CCCFG register */
109#define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */ 109#define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */
110#define GET_NUM_QDMACH(x) (x & 0x70 >> 4) /* bits 4-6 */ 110#define GET_NUM_QDMACH(x) ((x & 0x70) >> 4) /* bits 4-6 */
111#define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */ 111#define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */
112#define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */ 112#define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */
113#define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */ 113#define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */
@@ -1565,7 +1565,7 @@ static void edma_tc_set_pm_state(struct edma_tc *tc, bool enable)
1565 struct platform_device *tc_pdev; 1565 struct platform_device *tc_pdev;
1566 int ret; 1566 int ret;
1567 1567
1568 if (!tc) 1568 if (!IS_ENABLED(CONFIG_OF) || !tc)
1569 return; 1569 return;
1570 1570
1571 tc_pdev = of_find_device_by_node(tc->node); 1571 tc_pdev = of_find_device_by_node(tc->node);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 7058d58ba588..0f6fd42f55ca 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1462,7 +1462,7 @@ err_firmware:
1462 1462
1463#define EVENT_REMAP_CELLS 3 1463#define EVENT_REMAP_CELLS 3
1464 1464
1465static int __init sdma_event_remap(struct sdma_engine *sdma) 1465static int sdma_event_remap(struct sdma_engine *sdma)
1466{ 1466{
1467 struct device_node *np = sdma->dev->of_node; 1467 struct device_node *np = sdma->dev->of_node;
1468 struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0); 1468 struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0);
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index ebd8a5f398b0..f1bcc2a163b3 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -679,8 +679,11 @@ static int usb_dmac_runtime_suspend(struct device *dev)
679 struct usb_dmac *dmac = dev_get_drvdata(dev); 679 struct usb_dmac *dmac = dev_get_drvdata(dev);
680 int i; 680 int i;
681 681
682 for (i = 0; i < dmac->n_channels; ++i) 682 for (i = 0; i < dmac->n_channels; ++i) {
683 if (!dmac->channels[i].iomem)
684 break;
683 usb_dmac_chan_halt(&dmac->channels[i]); 685 usb_dmac_chan_halt(&dmac->channels[i]);
686 }
684 687
685 return 0; 688 return 0;
686} 689}
@@ -799,11 +802,10 @@ static int usb_dmac_probe(struct platform_device *pdev)
799 ret = pm_runtime_get_sync(&pdev->dev); 802 ret = pm_runtime_get_sync(&pdev->dev);
800 if (ret < 0) { 803 if (ret < 0) {
801 dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret); 804 dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
802 return ret; 805 goto error_pm;
803 } 806 }
804 807
805 ret = usb_dmac_init(dmac); 808 ret = usb_dmac_init(dmac);
806 pm_runtime_put(&pdev->dev);
807 809
808 if (ret) { 810 if (ret) {
809 dev_err(&pdev->dev, "failed to reset device\n"); 811 dev_err(&pdev->dev, "failed to reset device\n");
@@ -851,10 +853,13 @@ static int usb_dmac_probe(struct platform_device *pdev)
851 if (ret < 0) 853 if (ret < 0)
852 goto error; 854 goto error;
853 855
856 pm_runtime_put(&pdev->dev);
854 return 0; 857 return 0;
855 858
856error: 859error:
857 of_dma_controller_free(pdev->dev.of_node); 860 of_dma_controller_free(pdev->dev.of_node);
861 pm_runtime_put(&pdev->dev);
862error_pm:
858 pm_runtime_disable(&pdev->dev); 863 pm_runtime_disable(&pdev->dev);
859 return ret; 864 return ret;
860} 865}
diff --git a/drivers/gpio/gpio-74xx-mmio.c b/drivers/gpio/gpio-74xx-mmio.c
index 6ed7c0fb3378..6b186829087c 100644
--- a/drivers/gpio/gpio-74xx-mmio.c
+++ b/drivers/gpio/gpio-74xx-mmio.c
@@ -113,13 +113,16 @@ static int mmio_74xx_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
113 113
114static int mmio_74xx_gpio_probe(struct platform_device *pdev) 114static int mmio_74xx_gpio_probe(struct platform_device *pdev)
115{ 115{
116 const struct of_device_id *of_id = 116 const struct of_device_id *of_id;
117 of_match_device(mmio_74xx_gpio_ids, &pdev->dev);
118 struct mmio_74xx_gpio_priv *priv; 117 struct mmio_74xx_gpio_priv *priv;
119 struct resource *res; 118 struct resource *res;
120 void __iomem *dat; 119 void __iomem *dat;
121 int err; 120 int err;
122 121
122 of_id = of_match_device(mmio_74xx_gpio_ids, &pdev->dev);
123 if (!of_id)
124 return -ENODEV;
125
123 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 126 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
124 if (!priv) 127 if (!priv)
125 return -ENOMEM; 128 return -ENOMEM;
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 56d2d026e62e..f7fbb46d5d79 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -1122,8 +1122,6 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
1122 /* MPUIO is a bit different, reading IRQ status clears it */ 1122 /* MPUIO is a bit different, reading IRQ status clears it */
1123 if (bank->is_mpuio) { 1123 if (bank->is_mpuio) {
1124 irqc->irq_ack = dummy_irq_chip.irq_ack; 1124 irqc->irq_ack = dummy_irq_chip.irq_ack;
1125 irqc->irq_mask = irq_gc_mask_set_bit;
1126 irqc->irq_unmask = irq_gc_mask_clr_bit;
1127 if (!bank->regs->wkup_en) 1125 if (!bank->regs->wkup_en)
1128 irqc->irq_set_wake = NULL; 1126 irqc->irq_set_wake = NULL;
1129 } 1127 }
diff --git a/drivers/gpio/gpio-palmas.c b/drivers/gpio/gpio-palmas.c
index 171a6389f9ce..52b447c071cb 100644
--- a/drivers/gpio/gpio-palmas.c
+++ b/drivers/gpio/gpio-palmas.c
@@ -167,6 +167,8 @@ static int palmas_gpio_probe(struct platform_device *pdev)
167 const struct palmas_device_data *dev_data; 167 const struct palmas_device_data *dev_data;
168 168
169 match = of_match_device(of_palmas_gpio_match, &pdev->dev); 169 match = of_match_device(of_palmas_gpio_match, &pdev->dev);
170 if (!match)
171 return -ENODEV;
170 dev_data = match->data; 172 dev_data = match->data;
171 if (!dev_data) 173 if (!dev_data)
172 dev_data = &palmas_dev_data; 174 dev_data = &palmas_dev_data;
diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c
index 045a952576c7..7b25fdf64802 100644
--- a/drivers/gpio/gpio-syscon.c
+++ b/drivers/gpio/gpio-syscon.c
@@ -187,11 +187,15 @@ MODULE_DEVICE_TABLE(of, syscon_gpio_ids);
187static int syscon_gpio_probe(struct platform_device *pdev) 187static int syscon_gpio_probe(struct platform_device *pdev)
188{ 188{
189 struct device *dev = &pdev->dev; 189 struct device *dev = &pdev->dev;
190 const struct of_device_id *of_id = of_match_device(syscon_gpio_ids, dev); 190 const struct of_device_id *of_id;
191 struct syscon_gpio_priv *priv; 191 struct syscon_gpio_priv *priv;
192 struct device_node *np = dev->of_node; 192 struct device_node *np = dev->of_node;
193 int ret; 193 int ret;
194 194
195 of_id = of_match_device(syscon_gpio_ids, dev);
196 if (!of_id)
197 return -ENODEV;
198
195 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 199 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
196 if (!priv) 200 if (!priv)
197 return -ENOMEM; 201 return -ENOMEM;
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 027e5f47dd28..896bf29776b0 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -375,6 +375,60 @@ static int tegra_gpio_irq_set_wake(struct irq_data *d, unsigned int enable)
375} 375}
376#endif 376#endif
377 377
378#ifdef CONFIG_DEBUG_FS
379
380#include <linux/debugfs.h>
381#include <linux/seq_file.h>
382
383static int dbg_gpio_show(struct seq_file *s, void *unused)
384{
385 int i;
386 int j;
387
388 for (i = 0; i < tegra_gpio_bank_count; i++) {
389 for (j = 0; j < 4; j++) {
390 int gpio = tegra_gpio_compose(i, j, 0);
391 seq_printf(s,
392 "%d:%d %02x %02x %02x %02x %02x %02x %06x\n",
393 i, j,
394 tegra_gpio_readl(GPIO_CNF(gpio)),
395 tegra_gpio_readl(GPIO_OE(gpio)),
396 tegra_gpio_readl(GPIO_OUT(gpio)),
397 tegra_gpio_readl(GPIO_IN(gpio)),
398 tegra_gpio_readl(GPIO_INT_STA(gpio)),
399 tegra_gpio_readl(GPIO_INT_ENB(gpio)),
400 tegra_gpio_readl(GPIO_INT_LVL(gpio)));
401 }
402 }
403 return 0;
404}
405
406static int dbg_gpio_open(struct inode *inode, struct file *file)
407{
408 return single_open(file, dbg_gpio_show, &inode->i_private);
409}
410
411static const struct file_operations debug_fops = {
412 .open = dbg_gpio_open,
413 .read = seq_read,
414 .llseek = seq_lseek,
415 .release = single_release,
416};
417
418static void tegra_gpio_debuginit(void)
419{
420 (void) debugfs_create_file("tegra_gpio", S_IRUGO,
421 NULL, NULL, &debug_fops);
422}
423
424#else
425
426static inline void tegra_gpio_debuginit(void)
427{
428}
429
430#endif
431
378static struct irq_chip tegra_gpio_irq_chip = { 432static struct irq_chip tegra_gpio_irq_chip = {
379 .name = "GPIO", 433 .name = "GPIO",
380 .irq_ack = tegra_gpio_irq_ack, 434 .irq_ack = tegra_gpio_irq_ack,
@@ -519,6 +573,8 @@ static int tegra_gpio_probe(struct platform_device *pdev)
519 spin_lock_init(&bank->lvl_lock[j]); 573 spin_lock_init(&bank->lvl_lock[j]);
520 } 574 }
521 575
576 tegra_gpio_debuginit();
577
522 return 0; 578 return 0;
523} 579}
524 580
@@ -536,52 +592,3 @@ static int __init tegra_gpio_init(void)
536 return platform_driver_register(&tegra_gpio_driver); 592 return platform_driver_register(&tegra_gpio_driver);
537} 593}
538postcore_initcall(tegra_gpio_init); 594postcore_initcall(tegra_gpio_init);
539
540#ifdef CONFIG_DEBUG_FS
541
542#include <linux/debugfs.h>
543#include <linux/seq_file.h>
544
545static int dbg_gpio_show(struct seq_file *s, void *unused)
546{
547 int i;
548 int j;
549
550 for (i = 0; i < tegra_gpio_bank_count; i++) {
551 for (j = 0; j < 4; j++) {
552 int gpio = tegra_gpio_compose(i, j, 0);
553 seq_printf(s,
554 "%d:%d %02x %02x %02x %02x %02x %02x %06x\n",
555 i, j,
556 tegra_gpio_readl(GPIO_CNF(gpio)),
557 tegra_gpio_readl(GPIO_OE(gpio)),
558 tegra_gpio_readl(GPIO_OUT(gpio)),
559 tegra_gpio_readl(GPIO_IN(gpio)),
560 tegra_gpio_readl(GPIO_INT_STA(gpio)),
561 tegra_gpio_readl(GPIO_INT_ENB(gpio)),
562 tegra_gpio_readl(GPIO_INT_LVL(gpio)));
563 }
564 }
565 return 0;
566}
567
568static int dbg_gpio_open(struct inode *inode, struct file *file)
569{
570 return single_open(file, dbg_gpio_show, &inode->i_private);
571}
572
573static const struct file_operations debug_fops = {
574 .open = dbg_gpio_open,
575 .read = seq_read,
576 .llseek = seq_lseek,
577 .release = single_release,
578};
579
580static int __init tegra_gpio_debuginit(void)
581{
582 (void) debugfs_create_file("tegra_gpio", S_IRUGO,
583 NULL, NULL, &debug_fops);
584 return 0;
585}
586late_initcall(tegra_gpio_debuginit);
587#endif
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index a18f00fc1bb8..2a91f3287e3b 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -233,7 +233,7 @@ static struct gpio_desc *gpio_name_to_desc(const char * const name)
233 for (i = 0; i != chip->ngpio; ++i) { 233 for (i = 0; i != chip->ngpio; ++i) {
234 struct gpio_desc *gpio = &chip->desc[i]; 234 struct gpio_desc *gpio = &chip->desc[i];
235 235
236 if (!gpio->name) 236 if (!gpio->name || !name)
237 continue; 237 continue;
238 238
239 if (!strcmp(gpio->name, name)) { 239 if (!strcmp(gpio->name, name)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 615ce6d464fb..5a5f04d0902d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -389,7 +389,6 @@ struct amdgpu_clock {
389 * Fences. 389 * Fences.
390 */ 390 */
391struct amdgpu_fence_driver { 391struct amdgpu_fence_driver {
392 struct amdgpu_ring *ring;
393 uint64_t gpu_addr; 392 uint64_t gpu_addr;
394 volatile uint32_t *cpu_addr; 393 volatile uint32_t *cpu_addr;
395 /* sync_seq is protected by ring emission lock */ 394 /* sync_seq is protected by ring emission lock */
@@ -398,7 +397,7 @@ struct amdgpu_fence_driver {
398 bool initialized; 397 bool initialized;
399 struct amdgpu_irq_src *irq_src; 398 struct amdgpu_irq_src *irq_src;
400 unsigned irq_type; 399 unsigned irq_type;
401 struct delayed_work lockup_work; 400 struct timer_list fallback_timer;
402 wait_queue_head_t fence_queue; 401 wait_queue_head_t fence_queue;
403}; 402};
404 403
@@ -497,6 +496,7 @@ struct amdgpu_bo_va_mapping {
497 496
498/* bo virtual addresses in a specific vm */ 497/* bo virtual addresses in a specific vm */
499struct amdgpu_bo_va { 498struct amdgpu_bo_va {
499 struct mutex mutex;
500 /* protected by bo being reserved */ 500 /* protected by bo being reserved */
501 struct list_head bo_list; 501 struct list_head bo_list;
502 struct fence *last_pt_update; 502 struct fence *last_pt_update;
@@ -539,6 +539,7 @@ struct amdgpu_bo {
539 /* Constant after initialization */ 539 /* Constant after initialization */
540 struct amdgpu_device *adev; 540 struct amdgpu_device *adev;
541 struct drm_gem_object gem_base; 541 struct drm_gem_object gem_base;
542 struct amdgpu_bo *parent;
542 543
543 struct ttm_bo_kmap_obj dma_buf_vmap; 544 struct ttm_bo_kmap_obj dma_buf_vmap;
544 pid_t pid; 545 pid_t pid;
@@ -917,8 +918,8 @@ struct amdgpu_ring {
917#define AMDGPU_VM_FAULT_STOP_ALWAYS 2 918#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
918 919
919struct amdgpu_vm_pt { 920struct amdgpu_vm_pt {
920 struct amdgpu_bo *bo; 921 struct amdgpu_bo *bo;
921 uint64_t addr; 922 uint64_t addr;
922}; 923};
923 924
924struct amdgpu_vm_id { 925struct amdgpu_vm_id {
@@ -926,13 +927,9 @@ struct amdgpu_vm_id {
926 uint64_t pd_gpu_addr; 927 uint64_t pd_gpu_addr;
927 /* last flushed PD/PT update */ 928 /* last flushed PD/PT update */
928 struct fence *flushed_updates; 929 struct fence *flushed_updates;
929 /* last use of vmid */
930 struct fence *last_id_use;
931}; 930};
932 931
933struct amdgpu_vm { 932struct amdgpu_vm {
934 struct mutex mutex;
935
936 struct rb_root va; 933 struct rb_root va;
937 934
938 /* protecting invalidated */ 935 /* protecting invalidated */
@@ -957,24 +954,72 @@ struct amdgpu_vm {
957 954
958 /* for id and flush management per ring */ 955 /* for id and flush management per ring */
959 struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS]; 956 struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS];
957 /* for interval tree */
958 spinlock_t it_lock;
959 /* protecting freed */
960 spinlock_t freed_lock;
960}; 961};
961 962
962struct amdgpu_vm_manager { 963struct amdgpu_vm_manager {
963 struct fence *active[AMDGPU_NUM_VM]; 964 struct {
964 uint32_t max_pfn; 965 struct fence *active;
966 atomic_long_t owner;
967 } ids[AMDGPU_NUM_VM];
968
969 uint32_t max_pfn;
965 /* number of VMIDs */ 970 /* number of VMIDs */
966 unsigned nvm; 971 unsigned nvm;
967 /* vram base address for page table entry */ 972 /* vram base address for page table entry */
968 u64 vram_base_offset; 973 u64 vram_base_offset;
969 /* is vm enabled? */ 974 /* is vm enabled? */
970 bool enabled; 975 bool enabled;
971 /* for hw to save the PD addr on suspend/resume */
972 uint32_t saved_table_addr[AMDGPU_NUM_VM];
973 /* vm pte handling */ 976 /* vm pte handling */
974 const struct amdgpu_vm_pte_funcs *vm_pte_funcs; 977 const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
975 struct amdgpu_ring *vm_pte_funcs_ring; 978 struct amdgpu_ring *vm_pte_funcs_ring;
976}; 979};
977 980
981void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
982int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
983void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
984struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
985 struct amdgpu_vm *vm,
986 struct list_head *head);
987int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
988 struct amdgpu_sync *sync);
989void amdgpu_vm_flush(struct amdgpu_ring *ring,
990 struct amdgpu_vm *vm,
991 struct fence *updates);
992void amdgpu_vm_fence(struct amdgpu_device *adev,
993 struct amdgpu_vm *vm,
994 struct fence *fence);
995uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr);
996int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
997 struct amdgpu_vm *vm);
998int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
999 struct amdgpu_vm *vm);
1000int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1001 struct amdgpu_sync *sync);
1002int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1003 struct amdgpu_bo_va *bo_va,
1004 struct ttm_mem_reg *mem);
1005void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
1006 struct amdgpu_bo *bo);
1007struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
1008 struct amdgpu_bo *bo);
1009struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1010 struct amdgpu_vm *vm,
1011 struct amdgpu_bo *bo);
1012int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1013 struct amdgpu_bo_va *bo_va,
1014 uint64_t addr, uint64_t offset,
1015 uint64_t size, uint32_t flags);
1016int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1017 struct amdgpu_bo_va *bo_va,
1018 uint64_t addr);
1019void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1020 struct amdgpu_bo_va *bo_va);
1021int amdgpu_vm_free_job(struct amdgpu_job *job);
1022
978/* 1023/*
979 * context related structures 1024 * context related structures
980 */ 1025 */
@@ -1211,6 +1256,7 @@ struct amdgpu_cs_parser {
1211 /* relocations */ 1256 /* relocations */
1212 struct amdgpu_bo_list_entry *vm_bos; 1257 struct amdgpu_bo_list_entry *vm_bos;
1213 struct list_head validated; 1258 struct list_head validated;
1259 struct fence *fence;
1214 1260
1215 struct amdgpu_ib *ibs; 1261 struct amdgpu_ib *ibs;
1216 uint32_t num_ibs; 1262 uint32_t num_ibs;
@@ -1226,7 +1272,7 @@ struct amdgpu_job {
1226 struct amdgpu_device *adev; 1272 struct amdgpu_device *adev;
1227 struct amdgpu_ib *ibs; 1273 struct amdgpu_ib *ibs;
1228 uint32_t num_ibs; 1274 uint32_t num_ibs;
1229 struct mutex job_lock; 1275 void *owner;
1230 struct amdgpu_user_fence uf; 1276 struct amdgpu_user_fence uf;
1231 int (*free_job)(struct amdgpu_job *job); 1277 int (*free_job)(struct amdgpu_job *job);
1232}; 1278};
@@ -2257,11 +2303,6 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev);
2257bool amdgpu_card_posted(struct amdgpu_device *adev); 2303bool amdgpu_card_posted(struct amdgpu_device *adev);
2258void amdgpu_update_display_priority(struct amdgpu_device *adev); 2304void amdgpu_update_display_priority(struct amdgpu_device *adev);
2259bool amdgpu_boot_test_post_card(struct amdgpu_device *adev); 2305bool amdgpu_boot_test_post_card(struct amdgpu_device *adev);
2260struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
2261 struct drm_file *filp,
2262 struct amdgpu_ctx *ctx,
2263 struct amdgpu_ib *ibs,
2264 uint32_t num_ibs);
2265 2306
2266int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); 2307int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
2267int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, 2308int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
@@ -2319,49 +2360,6 @@ long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
2319 unsigned long arg); 2360 unsigned long arg);
2320 2361
2321/* 2362/*
2322 * vm
2323 */
2324int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
2325void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
2326struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
2327 struct amdgpu_vm *vm,
2328 struct list_head *head);
2329int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
2330 struct amdgpu_sync *sync);
2331void amdgpu_vm_flush(struct amdgpu_ring *ring,
2332 struct amdgpu_vm *vm,
2333 struct fence *updates);
2334void amdgpu_vm_fence(struct amdgpu_device *adev,
2335 struct amdgpu_vm *vm,
2336 struct amdgpu_fence *fence);
2337uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr);
2338int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
2339 struct amdgpu_vm *vm);
2340int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
2341 struct amdgpu_vm *vm);
2342int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
2343 struct amdgpu_vm *vm, struct amdgpu_sync *sync);
2344int amdgpu_vm_bo_update(struct amdgpu_device *adev,
2345 struct amdgpu_bo_va *bo_va,
2346 struct ttm_mem_reg *mem);
2347void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2348 struct amdgpu_bo *bo);
2349struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
2350 struct amdgpu_bo *bo);
2351struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
2352 struct amdgpu_vm *vm,
2353 struct amdgpu_bo *bo);
2354int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2355 struct amdgpu_bo_va *bo_va,
2356 uint64_t addr, uint64_t offset,
2357 uint64_t size, uint32_t flags);
2358int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
2359 struct amdgpu_bo_va *bo_va,
2360 uint64_t addr);
2361void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2362 struct amdgpu_bo_va *bo_va);
2363int amdgpu_vm_free_job(struct amdgpu_job *job);
2364/*
2365 * functions used by amdgpu_encoder.c 2363 * functions used by amdgpu_encoder.c
2366 */ 2364 */
2367struct amdgpu_afmt_acr { 2365struct amdgpu_afmt_acr {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index dfc4d02c7a38..4f352ec9dec4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -127,30 +127,6 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
127 return 0; 127 return 0;
128} 128}
129 129
130struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
131 struct drm_file *filp,
132 struct amdgpu_ctx *ctx,
133 struct amdgpu_ib *ibs,
134 uint32_t num_ibs)
135{
136 struct amdgpu_cs_parser *parser;
137 int i;
138
139 parser = kzalloc(sizeof(struct amdgpu_cs_parser), GFP_KERNEL);
140 if (!parser)
141 return NULL;
142
143 parser->adev = adev;
144 parser->filp = filp;
145 parser->ctx = ctx;
146 parser->ibs = ibs;
147 parser->num_ibs = num_ibs;
148 for (i = 0; i < num_ibs; i++)
149 ibs[i].ctx = ctx;
150
151 return parser;
152}
153
154int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) 130int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
155{ 131{
156 union drm_amdgpu_cs *cs = data; 132 union drm_amdgpu_cs *cs = data;
@@ -246,6 +222,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
246 } 222 }
247 223
248 p->uf.bo = gem_to_amdgpu_bo(gobj); 224 p->uf.bo = gem_to_amdgpu_bo(gobj);
225 amdgpu_bo_ref(p->uf.bo);
226 drm_gem_object_unreference_unlocked(gobj);
249 p->uf.offset = fence_data->offset; 227 p->uf.offset = fence_data->offset;
250 } else { 228 } else {
251 ret = -EINVAL; 229 ret = -EINVAL;
@@ -463,8 +441,18 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a,
463 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; 441 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
464} 442}
465 443
466static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int error, bool backoff) 444/**
445 * cs_parser_fini() - clean parser states
446 * @parser: parser structure holding parsing context.
447 * @error: error number
448 *
449 * If error is set than unvalidate buffer, otherwise just free memory
450 * used by parsing context.
451 **/
452static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
467{ 453{
454 unsigned i;
455
468 if (!error) { 456 if (!error) {
469 /* Sort the buffer list from the smallest to largest buffer, 457 /* Sort the buffer list from the smallest to largest buffer,
470 * which affects the order of buffers in the LRU list. 458 * which affects the order of buffers in the LRU list.
@@ -479,17 +467,14 @@ static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int err
479 list_sort(NULL, &parser->validated, cmp_size_smaller_first); 467 list_sort(NULL, &parser->validated, cmp_size_smaller_first);
480 468
481 ttm_eu_fence_buffer_objects(&parser->ticket, 469 ttm_eu_fence_buffer_objects(&parser->ticket,
482 &parser->validated, 470 &parser->validated,
483 &parser->ibs[parser->num_ibs-1].fence->base); 471 parser->fence);
484 } else if (backoff) { 472 } else if (backoff) {
485 ttm_eu_backoff_reservation(&parser->ticket, 473 ttm_eu_backoff_reservation(&parser->ticket,
486 &parser->validated); 474 &parser->validated);
487 } 475 }
488} 476 fence_put(parser->fence);
489 477
490static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser)
491{
492 unsigned i;
493 if (parser->ctx) 478 if (parser->ctx)
494 amdgpu_ctx_put(parser->ctx); 479 amdgpu_ctx_put(parser->ctx);
495 if (parser->bo_list) 480 if (parser->bo_list)
@@ -499,31 +484,12 @@ static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser)
499 for (i = 0; i < parser->nchunks; i++) 484 for (i = 0; i < parser->nchunks; i++)
500 drm_free_large(parser->chunks[i].kdata); 485 drm_free_large(parser->chunks[i].kdata);
501 kfree(parser->chunks); 486 kfree(parser->chunks);
502 if (!amdgpu_enable_scheduler) 487 if (parser->ibs)
503 { 488 for (i = 0; i < parser->num_ibs; i++)
504 if (parser->ibs) 489 amdgpu_ib_free(parser->adev, &parser->ibs[i]);
505 for (i = 0; i < parser->num_ibs; i++) 490 kfree(parser->ibs);
506 amdgpu_ib_free(parser->adev, &parser->ibs[i]); 491 if (parser->uf.bo)
507 kfree(parser->ibs); 492 amdgpu_bo_unref(&parser->uf.bo);
508 if (parser->uf.bo)
509 drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
510 }
511
512 kfree(parser);
513}
514
515/**
516 * cs_parser_fini() - clean parser states
517 * @parser: parser structure holding parsing context.
518 * @error: error number
519 *
520 * If error is set than unvalidate buffer, otherwise just free memory
521 * used by parsing context.
522 **/
523static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
524{
525 amdgpu_cs_parser_fini_early(parser, error, backoff);
526 amdgpu_cs_parser_fini_late(parser);
527} 493}
528 494
529static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, 495static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
@@ -610,15 +576,9 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
610 } 576 }
611 577
612 r = amdgpu_bo_vm_update_pte(parser, vm); 578 r = amdgpu_bo_vm_update_pte(parser, vm);
613 if (r) { 579 if (!r)
614 goto out; 580 amdgpu_cs_sync_rings(parser);
615 }
616 amdgpu_cs_sync_rings(parser);
617 if (!amdgpu_enable_scheduler)
618 r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs,
619 parser->filp);
620 581
621out:
622 return r; 582 return r;
623} 583}
624 584
@@ -818,7 +778,7 @@ static int amdgpu_cs_free_job(struct amdgpu_job *job)
818 amdgpu_ib_free(job->adev, &job->ibs[i]); 778 amdgpu_ib_free(job->adev, &job->ibs[i]);
819 kfree(job->ibs); 779 kfree(job->ibs);
820 if (job->uf.bo) 780 if (job->uf.bo)
821 drm_gem_object_unreference_unlocked(&job->uf.bo->gem_base); 781 amdgpu_bo_unref(&job->uf.bo);
822 return 0; 782 return 0;
823} 783}
824 784
@@ -826,38 +786,35 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
826{ 786{
827 struct amdgpu_device *adev = dev->dev_private; 787 struct amdgpu_device *adev = dev->dev_private;
828 union drm_amdgpu_cs *cs = data; 788 union drm_amdgpu_cs *cs = data;
829 struct amdgpu_fpriv *fpriv = filp->driver_priv; 789 struct amdgpu_cs_parser parser = {};
830 struct amdgpu_vm *vm = &fpriv->vm;
831 struct amdgpu_cs_parser *parser;
832 bool reserved_buffers = false; 790 bool reserved_buffers = false;
833 int i, r; 791 int i, r;
834 792
835 if (!adev->accel_working) 793 if (!adev->accel_working)
836 return -EBUSY; 794 return -EBUSY;
837 795
838 parser = amdgpu_cs_parser_create(adev, filp, NULL, NULL, 0); 796 parser.adev = adev;
839 if (!parser) 797 parser.filp = filp;
840 return -ENOMEM; 798
841 r = amdgpu_cs_parser_init(parser, data); 799 r = amdgpu_cs_parser_init(&parser, data);
842 if (r) { 800 if (r) {
843 DRM_ERROR("Failed to initialize parser !\n"); 801 DRM_ERROR("Failed to initialize parser !\n");
844 amdgpu_cs_parser_fini(parser, r, false); 802 amdgpu_cs_parser_fini(&parser, r, false);
845 r = amdgpu_cs_handle_lockup(adev, r); 803 r = amdgpu_cs_handle_lockup(adev, r);
846 return r; 804 return r;
847 } 805 }
848 mutex_lock(&vm->mutex); 806 r = amdgpu_cs_parser_relocs(&parser);
849 r = amdgpu_cs_parser_relocs(parser);
850 if (r == -ENOMEM) 807 if (r == -ENOMEM)
851 DRM_ERROR("Not enough memory for command submission!\n"); 808 DRM_ERROR("Not enough memory for command submission!\n");
852 else if (r && r != -ERESTARTSYS) 809 else if (r && r != -ERESTARTSYS)
853 DRM_ERROR("Failed to process the buffer list %d!\n", r); 810 DRM_ERROR("Failed to process the buffer list %d!\n", r);
854 else if (!r) { 811 else if (!r) {
855 reserved_buffers = true; 812 reserved_buffers = true;
856 r = amdgpu_cs_ib_fill(adev, parser); 813 r = amdgpu_cs_ib_fill(adev, &parser);
857 } 814 }
858 815
859 if (!r) { 816 if (!r) {
860 r = amdgpu_cs_dependencies(adev, parser); 817 r = amdgpu_cs_dependencies(adev, &parser);
861 if (r) 818 if (r)
862 DRM_ERROR("Failed in the dependencies handling %d!\n", r); 819 DRM_ERROR("Failed in the dependencies handling %d!\n", r);
863 } 820 }
@@ -865,63 +822,71 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
865 if (r) 822 if (r)
866 goto out; 823 goto out;
867 824
868 for (i = 0; i < parser->num_ibs; i++) 825 for (i = 0; i < parser.num_ibs; i++)
869 trace_amdgpu_cs(parser, i); 826 trace_amdgpu_cs(&parser, i);
870 827
871 r = amdgpu_cs_ib_vm_chunk(adev, parser); 828 r = amdgpu_cs_ib_vm_chunk(adev, &parser);
872 if (r) 829 if (r)
873 goto out; 830 goto out;
874 831
875 if (amdgpu_enable_scheduler && parser->num_ibs) { 832 if (amdgpu_enable_scheduler && parser.num_ibs) {
833 struct amdgpu_ring * ring = parser.ibs->ring;
834 struct amd_sched_fence *fence;
876 struct amdgpu_job *job; 835 struct amdgpu_job *job;
877 struct amdgpu_ring * ring = parser->ibs->ring; 836
878 job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); 837 job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
879 if (!job) { 838 if (!job) {
880 r = -ENOMEM; 839 r = -ENOMEM;
881 goto out; 840 goto out;
882 } 841 }
842
883 job->base.sched = &ring->sched; 843 job->base.sched = &ring->sched;
884 job->base.s_entity = &parser->ctx->rings[ring->idx].entity; 844 job->base.s_entity = &parser.ctx->rings[ring->idx].entity;
885 job->adev = parser->adev; 845 job->adev = parser.adev;
886 job->ibs = parser->ibs; 846 job->owner = parser.filp;
887 job->num_ibs = parser->num_ibs; 847 job->free_job = amdgpu_cs_free_job;
888 job->base.owner = parser->filp; 848
889 mutex_init(&job->job_lock); 849 job->ibs = parser.ibs;
850 job->num_ibs = parser.num_ibs;
851 parser.ibs = NULL;
852 parser.num_ibs = 0;
853
890 if (job->ibs[job->num_ibs - 1].user) { 854 if (job->ibs[job->num_ibs - 1].user) {
891 memcpy(&job->uf, &parser->uf, 855 job->uf = parser.uf;
892 sizeof(struct amdgpu_user_fence));
893 job->ibs[job->num_ibs - 1].user = &job->uf; 856 job->ibs[job->num_ibs - 1].user = &job->uf;
857 parser.uf.bo = NULL;
894 } 858 }
895 859
896 job->free_job = amdgpu_cs_free_job; 860 fence = amd_sched_fence_create(job->base.s_entity,
897 mutex_lock(&job->job_lock); 861 parser.filp);
898 r = amd_sched_entity_push_job(&job->base); 862 if (!fence) {
899 if (r) { 863 r = -ENOMEM;
900 mutex_unlock(&job->job_lock);
901 amdgpu_cs_free_job(job); 864 amdgpu_cs_free_job(job);
902 kfree(job); 865 kfree(job);
903 goto out; 866 goto out;
904 } 867 }
905 cs->out.handle = 868 job->base.s_fence = fence;
906 amdgpu_ctx_add_fence(parser->ctx, ring, 869 parser.fence = fence_get(&fence->base);
907 &job->base.s_fence->base);
908 parser->ibs[parser->num_ibs - 1].sequence = cs->out.handle;
909 870
910 list_sort(NULL, &parser->validated, cmp_size_smaller_first); 871 cs->out.handle = amdgpu_ctx_add_fence(parser.ctx, ring,
911 ttm_eu_fence_buffer_objects(&parser->ticket, 872 &fence->base);
912 &parser->validated, 873 job->ibs[job->num_ibs - 1].sequence = cs->out.handle;
913 &job->base.s_fence->base);
914 874
915 mutex_unlock(&job->job_lock); 875 trace_amdgpu_cs_ioctl(job);
916 amdgpu_cs_parser_fini_late(parser); 876 amd_sched_entity_push_job(&job->base);
917 mutex_unlock(&vm->mutex); 877
918 return 0; 878 } else {
879 struct amdgpu_fence *fence;
880
881 r = amdgpu_ib_schedule(adev, parser.num_ibs, parser.ibs,
882 parser.filp);
883 fence = parser.ibs[parser.num_ibs - 1].fence;
884 parser.fence = fence_get(&fence->base);
885 cs->out.handle = parser.ibs[parser.num_ibs - 1].sequence;
919 } 886 }
920 887
921 cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence;
922out: 888out:
923 amdgpu_cs_parser_fini(parser, r, reserved_buffers); 889 amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
924 mutex_unlock(&vm->mutex);
925 r = amdgpu_cs_handle_lockup(adev, r); 890 r = amdgpu_cs_handle_lockup(adev, r);
926 return r; 891 return r;
927} 892}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index e173a5a02f0d..acd066d0a805 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -73,6 +73,8 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
73 struct drm_crtc *crtc = &amdgpuCrtc->base; 73 struct drm_crtc *crtc = &amdgpuCrtc->base;
74 unsigned long flags; 74 unsigned long flags;
75 unsigned i; 75 unsigned i;
76 int vpos, hpos, stat, min_udelay;
77 struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
76 78
77 amdgpu_flip_wait_fence(adev, &work->excl); 79 amdgpu_flip_wait_fence(adev, &work->excl);
78 for (i = 0; i < work->shared_count; ++i) 80 for (i = 0; i < work->shared_count; ++i)
@@ -81,6 +83,41 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
81 /* We borrow the event spin lock for protecting flip_status */ 83 /* We borrow the event spin lock for protecting flip_status */
82 spin_lock_irqsave(&crtc->dev->event_lock, flags); 84 spin_lock_irqsave(&crtc->dev->event_lock, flags);
83 85
86 /* If this happens to execute within the "virtually extended" vblank
87 * interval before the start of the real vblank interval then it needs
88 * to delay programming the mmio flip until the real vblank is entered.
89 * This prevents completing a flip too early due to the way we fudge
90 * our vblank counter and vblank timestamps in order to work around the
91 * problem that the hw fires vblank interrupts before actual start of
92 * vblank (when line buffer refilling is done for a frame). It
93 * complements the fudging logic in amdgpu_get_crtc_scanoutpos() for
94 * timestamping and amdgpu_get_vblank_counter_kms() for vblank counts.
95 *
96 * In practice this won't execute very often unless on very fast
97 * machines because the time window for this to happen is very small.
98 */
99 for (;;) {
100 /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
101 * start in hpos, and to the "fudged earlier" vblank start in
102 * vpos.
103 */
104 stat = amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id,
105 GET_DISTANCE_TO_VBLANKSTART,
106 &vpos, &hpos, NULL, NULL,
107 &crtc->hwmode);
108
109 if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
110 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) ||
111 !(vpos >= 0 && hpos <= 0))
112 break;
113
114 /* Sleep at least until estimated real start of hw vblank */
115 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
116 min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
117 usleep_range(min_udelay, 2 * min_udelay);
118 spin_lock_irqsave(&crtc->dev->event_lock, flags);
119 };
120
84 /* do the flip (mmio) */ 121 /* do the flip (mmio) */
85 adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base); 122 adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
86 /* set the flip status */ 123 /* set the flip status */
@@ -109,7 +146,7 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
109 } else 146 } else
110 DRM_ERROR("failed to reserve buffer after flip\n"); 147 DRM_ERROR("failed to reserve buffer after flip\n");
111 148
112 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); 149 amdgpu_bo_unref(&work->old_rbo);
113 kfree(work->shared); 150 kfree(work->shared);
114 kfree(work); 151 kfree(work);
115} 152}
@@ -148,8 +185,8 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
148 obj = old_amdgpu_fb->obj; 185 obj = old_amdgpu_fb->obj;
149 186
150 /* take a reference to the old object */ 187 /* take a reference to the old object */
151 drm_gem_object_reference(obj);
152 work->old_rbo = gem_to_amdgpu_bo(obj); 188 work->old_rbo = gem_to_amdgpu_bo(obj);
189 amdgpu_bo_ref(work->old_rbo);
153 190
154 new_amdgpu_fb = to_amdgpu_framebuffer(fb); 191 new_amdgpu_fb = to_amdgpu_framebuffer(fb);
155 obj = new_amdgpu_fb->obj; 192 obj = new_amdgpu_fb->obj;
@@ -222,7 +259,7 @@ pflip_cleanup:
222 amdgpu_bo_unreserve(new_rbo); 259 amdgpu_bo_unreserve(new_rbo);
223 260
224cleanup: 261cleanup:
225 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); 262 amdgpu_bo_unref(&work->old_rbo);
226 fence_put(work->excl); 263 fence_put(work->excl);
227 for (i = 0; i < work->shared_count; ++i) 264 for (i = 0; i < work->shared_count; ++i)
228 fence_put(work->shared[i]); 265 fence_put(work->shared[i]);
@@ -481,7 +518,7 @@ static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
481int 518int
482amdgpu_framebuffer_init(struct drm_device *dev, 519amdgpu_framebuffer_init(struct drm_device *dev,
483 struct amdgpu_framebuffer *rfb, 520 struct amdgpu_framebuffer *rfb,
484 struct drm_mode_fb_cmd2 *mode_cmd, 521 const struct drm_mode_fb_cmd2 *mode_cmd,
485 struct drm_gem_object *obj) 522 struct drm_gem_object *obj)
486{ 523{
487 int ret; 524 int ret;
@@ -498,7 +535,7 @@ amdgpu_framebuffer_init(struct drm_device *dev,
498static struct drm_framebuffer * 535static struct drm_framebuffer *
499amdgpu_user_framebuffer_create(struct drm_device *dev, 536amdgpu_user_framebuffer_create(struct drm_device *dev,
500 struct drm_file *file_priv, 537 struct drm_file *file_priv,
501 struct drm_mode_fb_cmd2 *mode_cmd) 538 const struct drm_mode_fb_cmd2 *mode_cmd)
502{ 539{
503 struct drm_gem_object *obj; 540 struct drm_gem_object *obj;
504 struct amdgpu_framebuffer *amdgpu_fb; 541 struct amdgpu_framebuffer *amdgpu_fb;
@@ -712,6 +749,15 @@ bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
712 * \param dev Device to query. 749 * \param dev Device to query.
713 * \param pipe Crtc to query. 750 * \param pipe Crtc to query.
714 * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0). 751 * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
752 * For driver internal use only also supports these flags:
753 *
754 * USE_REAL_VBLANKSTART to use the real start of vblank instead
755 * of a fudged earlier start of vblank.
756 *
757 * GET_DISTANCE_TO_VBLANKSTART to return distance to the
758 * fudged earlier start of vblank in *vpos and the distance
759 * to true start of vblank in *hpos.
760 *
715 * \param *vpos Location where vertical scanout position should be stored. 761 * \param *vpos Location where vertical scanout position should be stored.
716 * \param *hpos Location where horizontal scanout position should go. 762 * \param *hpos Location where horizontal scanout position should go.
717 * \param *stime Target location for timestamp taken immediately before 763 * \param *stime Target location for timestamp taken immediately before
@@ -776,10 +822,40 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
776 vbl_end = 0; 822 vbl_end = 0;
777 } 823 }
778 824
825 /* Called from driver internal vblank counter query code? */
826 if (flags & GET_DISTANCE_TO_VBLANKSTART) {
827 /* Caller wants distance from real vbl_start in *hpos */
828 *hpos = *vpos - vbl_start;
829 }
830
831 /* Fudge vblank to start a few scanlines earlier to handle the
832 * problem that vblank irqs fire a few scanlines before start
833 * of vblank. Some driver internal callers need the true vblank
834 * start to be used and signal this via the USE_REAL_VBLANKSTART flag.
835 *
836 * The cause of the "early" vblank irq is that the irq is triggered
837 * by the line buffer logic when the line buffer read position enters
838 * the vblank, whereas our crtc scanout position naturally lags the
839 * line buffer read position.
840 */
841 if (!(flags & USE_REAL_VBLANKSTART))
842 vbl_start -= adev->mode_info.crtcs[pipe]->lb_vblank_lead_lines;
843
779 /* Test scanout position against vblank region. */ 844 /* Test scanout position against vblank region. */
780 if ((*vpos < vbl_start) && (*vpos >= vbl_end)) 845 if ((*vpos < vbl_start) && (*vpos >= vbl_end))
781 in_vbl = false; 846 in_vbl = false;
782 847
848 /* In vblank? */
849 if (in_vbl)
850 ret |= DRM_SCANOUTPOS_IN_VBLANK;
851
852 /* Called from driver internal vblank counter query code? */
853 if (flags & GET_DISTANCE_TO_VBLANKSTART) {
854 /* Caller wants distance from fudged earlier vbl_start */
855 *vpos -= vbl_start;
856 return ret;
857 }
858
783 /* Check if inside vblank area and apply corrective offsets: 859 /* Check if inside vblank area and apply corrective offsets:
784 * vpos will then be >=0 in video scanout area, but negative 860 * vpos will then be >=0 in video scanout area, but negative
785 * within vblank area, counting down the number of lines until 861 * within vblank area, counting down the number of lines until
@@ -795,32 +871,6 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
795 /* Correct for shifted end of vbl at vbl_end. */ 871 /* Correct for shifted end of vbl at vbl_end. */
796 *vpos = *vpos - vbl_end; 872 *vpos = *vpos - vbl_end;
797 873
798 /* In vblank? */
799 if (in_vbl)
800 ret |= DRM_SCANOUTPOS_IN_VBLANK;
801
802 /* Is vpos outside nominal vblank area, but less than
803 * 1/100 of a frame height away from start of vblank?
804 * If so, assume this isn't a massively delayed vblank
805 * interrupt, but a vblank interrupt that fired a few
806 * microseconds before true start of vblank. Compensate
807 * by adding a full frame duration to the final timestamp.
808 * Happens, e.g., on ATI R500, R600.
809 *
810 * We only do this if DRM_CALLED_FROM_VBLIRQ.
811 */
812 if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) {
813 vbl_start = mode->crtc_vdisplay;
814 vtotal = mode->crtc_vtotal;
815
816 if (vbl_start - *vpos < vtotal / 100) {
817 *vpos -= vtotal;
818
819 /* Signal this correction as "applied". */
820 ret |= 0x8;
821 }
822 }
823
824 return ret; 874 return ret;
825} 875}
826 876
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 093a8c618931..6fcbbcc2e99e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -45,7 +45,6 @@
45struct amdgpu_fbdev { 45struct amdgpu_fbdev {
46 struct drm_fb_helper helper; 46 struct drm_fb_helper helper;
47 struct amdgpu_framebuffer rfb; 47 struct amdgpu_framebuffer rfb;
48 struct list_head fbdev_list;
49 struct amdgpu_device *adev; 48 struct amdgpu_device *adev;
50}; 49};
51 50
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 257d72205bb5..3671f9f220bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -47,6 +47,9 @@
47 * that the the relevant GPU caches have been flushed. 47 * that the the relevant GPU caches have been flushed.
48 */ 48 */
49 49
50static struct kmem_cache *amdgpu_fence_slab;
51static atomic_t amdgpu_fence_slab_ref = ATOMIC_INIT(0);
52
50/** 53/**
51 * amdgpu_fence_write - write a fence value 54 * amdgpu_fence_write - write a fence value
52 * 55 *
@@ -85,24 +88,6 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
85} 88}
86 89
87/** 90/**
88 * amdgpu_fence_schedule_check - schedule lockup check
89 *
90 * @ring: pointer to struct amdgpu_ring
91 *
92 * Queues a delayed work item to check for lockups.
93 */
94static void amdgpu_fence_schedule_check(struct amdgpu_ring *ring)
95{
96 /*
97 * Do not reset the timer here with mod_delayed_work,
98 * this can livelock in an interaction with TTM delayed destroy.
99 */
100 queue_delayed_work(system_power_efficient_wq,
101 &ring->fence_drv.lockup_work,
102 AMDGPU_FENCE_JIFFIES_TIMEOUT);
103}
104
105/**
106 * amdgpu_fence_emit - emit a fence on the requested ring 91 * amdgpu_fence_emit - emit a fence on the requested ring
107 * 92 *
108 * @ring: ring the fence is associated with 93 * @ring: ring the fence is associated with
@@ -118,7 +103,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
118 struct amdgpu_device *adev = ring->adev; 103 struct amdgpu_device *adev = ring->adev;
119 104
120 /* we are protected by the ring emission mutex */ 105 /* we are protected by the ring emission mutex */
121 *fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL); 106 *fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
122 if ((*fence) == NULL) { 107 if ((*fence) == NULL) {
123 return -ENOMEM; 108 return -ENOMEM;
124 } 109 }
@@ -132,11 +117,23 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
132 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, 117 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
133 (*fence)->seq, 118 (*fence)->seq,
134 AMDGPU_FENCE_FLAG_INT); 119 AMDGPU_FENCE_FLAG_INT);
135 trace_amdgpu_fence_emit(ring->adev->ddev, ring->idx, (*fence)->seq);
136 return 0; 120 return 0;
137} 121}
138 122
139/** 123/**
124 * amdgpu_fence_schedule_fallback - schedule fallback check
125 *
126 * @ring: pointer to struct amdgpu_ring
127 *
128 * Start a timer as fallback to our interrupts.
129 */
130static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
131{
132 mod_timer(&ring->fence_drv.fallback_timer,
133 jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
134}
135
136/**
140 * amdgpu_fence_activity - check for fence activity 137 * amdgpu_fence_activity - check for fence activity
141 * 138 *
142 * @ring: pointer to struct amdgpu_ring 139 * @ring: pointer to struct amdgpu_ring
@@ -202,45 +199,38 @@ static bool amdgpu_fence_activity(struct amdgpu_ring *ring)
202 } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq); 199 } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
203 200
204 if (seq < last_emitted) 201 if (seq < last_emitted)
205 amdgpu_fence_schedule_check(ring); 202 amdgpu_fence_schedule_fallback(ring);
206 203
207 return wake; 204 return wake;
208} 205}
209 206
210/** 207/**
211 * amdgpu_fence_check_lockup - check for hardware lockup 208 * amdgpu_fence_process - process a fence
212 * 209 *
213 * @work: delayed work item 210 * @adev: amdgpu_device pointer
211 * @ring: ring index the fence is associated with
214 * 212 *
215 * Checks for fence activity and if there is none probe 213 * Checks the current fence value and wakes the fence queue
216 * the hardware if a lockup occured. 214 * if the sequence number has increased (all asics).
217 */ 215 */
218static void amdgpu_fence_check_lockup(struct work_struct *work) 216void amdgpu_fence_process(struct amdgpu_ring *ring)
219{ 217{
220 struct amdgpu_fence_driver *fence_drv;
221 struct amdgpu_ring *ring;
222
223 fence_drv = container_of(work, struct amdgpu_fence_driver,
224 lockup_work.work);
225 ring = fence_drv->ring;
226
227 if (amdgpu_fence_activity(ring)) 218 if (amdgpu_fence_activity(ring))
228 wake_up_all(&ring->fence_drv.fence_queue); 219 wake_up_all(&ring->fence_drv.fence_queue);
229} 220}
230 221
231/** 222/**
232 * amdgpu_fence_process - process a fence 223 * amdgpu_fence_fallback - fallback for hardware interrupts
233 * 224 *
234 * @adev: amdgpu_device pointer 225 * @work: delayed work item
235 * @ring: ring index the fence is associated with
236 * 226 *
237 * Checks the current fence value and wakes the fence queue 227 * Checks for fence activity.
238 * if the sequence number has increased (all asics).
239 */ 228 */
240void amdgpu_fence_process(struct amdgpu_ring *ring) 229static void amdgpu_fence_fallback(unsigned long arg)
241{ 230{
242 if (amdgpu_fence_activity(ring)) 231 struct amdgpu_ring *ring = (void *)arg;
243 wake_up_all(&ring->fence_drv.fence_queue); 232
233 amdgpu_fence_process(ring);
244} 234}
245 235
246/** 236/**
@@ -290,7 +280,7 @@ static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq)
290 if (atomic64_read(&ring->fence_drv.last_seq) >= seq) 280 if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
291 return 0; 281 return 0;
292 282
293 amdgpu_fence_schedule_check(ring); 283 amdgpu_fence_schedule_fallback(ring);
294 wait_event(ring->fence_drv.fence_queue, ( 284 wait_event(ring->fence_drv.fence_queue, (
295 (signaled = amdgpu_fence_seq_signaled(ring, seq)))); 285 (signaled = amdgpu_fence_seq_signaled(ring, seq))));
296 286
@@ -491,9 +481,8 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
491 atomic64_set(&ring->fence_drv.last_seq, 0); 481 atomic64_set(&ring->fence_drv.last_seq, 0);
492 ring->fence_drv.initialized = false; 482 ring->fence_drv.initialized = false;
493 483
494 INIT_DELAYED_WORK(&ring->fence_drv.lockup_work, 484 setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
495 amdgpu_fence_check_lockup); 485 (unsigned long)ring);
496 ring->fence_drv.ring = ring;
497 486
498 init_waitqueue_head(&ring->fence_drv.fence_queue); 487 init_waitqueue_head(&ring->fence_drv.fence_queue);
499 488
@@ -536,6 +525,13 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
536 */ 525 */
537int amdgpu_fence_driver_init(struct amdgpu_device *adev) 526int amdgpu_fence_driver_init(struct amdgpu_device *adev)
538{ 527{
528 if (atomic_inc_return(&amdgpu_fence_slab_ref) == 1) {
529 amdgpu_fence_slab = kmem_cache_create(
530 "amdgpu_fence", sizeof(struct amdgpu_fence), 0,
531 SLAB_HWCACHE_ALIGN, NULL);
532 if (!amdgpu_fence_slab)
533 return -ENOMEM;
534 }
539 if (amdgpu_debugfs_fence_init(adev)) 535 if (amdgpu_debugfs_fence_init(adev))
540 dev_err(adev->dev, "fence debugfs file creation failed\n"); 536 dev_err(adev->dev, "fence debugfs file creation failed\n");
541 537
@@ -554,9 +550,12 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
554{ 550{
555 int i, r; 551 int i, r;
556 552
553 if (atomic_dec_and_test(&amdgpu_fence_slab_ref))
554 kmem_cache_destroy(amdgpu_fence_slab);
557 mutex_lock(&adev->ring_lock); 555 mutex_lock(&adev->ring_lock);
558 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 556 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
559 struct amdgpu_ring *ring = adev->rings[i]; 557 struct amdgpu_ring *ring = adev->rings[i];
558
560 if (!ring || !ring->fence_drv.initialized) 559 if (!ring || !ring->fence_drv.initialized)
561 continue; 560 continue;
562 r = amdgpu_fence_wait_empty(ring); 561 r = amdgpu_fence_wait_empty(ring);
@@ -568,6 +567,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
568 amdgpu_irq_put(adev, ring->fence_drv.irq_src, 567 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
569 ring->fence_drv.irq_type); 568 ring->fence_drv.irq_type);
570 amd_sched_fini(&ring->sched); 569 amd_sched_fini(&ring->sched);
570 del_timer_sync(&ring->fence_drv.fallback_timer);
571 ring->fence_drv.initialized = false; 571 ring->fence_drv.initialized = false;
572 } 572 }
573 mutex_unlock(&adev->ring_lock); 573 mutex_unlock(&adev->ring_lock);
@@ -751,18 +751,25 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
751 fence->fence_wake.func = amdgpu_fence_check_signaled; 751 fence->fence_wake.func = amdgpu_fence_check_signaled;
752 __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake); 752 __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake);
753 fence_get(f); 753 fence_get(f);
754 amdgpu_fence_schedule_check(ring); 754 if (!timer_pending(&ring->fence_drv.fallback_timer))
755 amdgpu_fence_schedule_fallback(ring);
755 FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); 756 FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
756 return true; 757 return true;
757} 758}
758 759
760static void amdgpu_fence_release(struct fence *f)
761{
762 struct amdgpu_fence *fence = to_amdgpu_fence(f);
763 kmem_cache_free(amdgpu_fence_slab, fence);
764}
765
759const struct fence_ops amdgpu_fence_ops = { 766const struct fence_ops amdgpu_fence_ops = {
760 .get_driver_name = amdgpu_fence_get_driver_name, 767 .get_driver_name = amdgpu_fence_get_driver_name,
761 .get_timeline_name = amdgpu_fence_get_timeline_name, 768 .get_timeline_name = amdgpu_fence_get_timeline_name,
762 .enable_signaling = amdgpu_fence_enable_signaling, 769 .enable_signaling = amdgpu_fence_enable_signaling,
763 .signaled = amdgpu_fence_is_signaled, 770 .signaled = amdgpu_fence_is_signaled,
764 .wait = fence_default_wait, 771 .wait = fence_default_wait,
765 .release = NULL, 772 .release = amdgpu_fence_release,
766}; 773};
767 774
768/* 775/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 087332858853..f6ea4b43a60c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -115,12 +115,9 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
115 struct amdgpu_vm *vm = &fpriv->vm; 115 struct amdgpu_vm *vm = &fpriv->vm;
116 struct amdgpu_bo_va *bo_va; 116 struct amdgpu_bo_va *bo_va;
117 int r; 117 int r;
118 mutex_lock(&vm->mutex);
119 r = amdgpu_bo_reserve(rbo, false); 118 r = amdgpu_bo_reserve(rbo, false);
120 if (r) { 119 if (r)
121 mutex_unlock(&vm->mutex);
122 return r; 120 return r;
123 }
124 121
125 bo_va = amdgpu_vm_bo_find(vm, rbo); 122 bo_va = amdgpu_vm_bo_find(vm, rbo);
126 if (!bo_va) { 123 if (!bo_va) {
@@ -129,7 +126,6 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
129 ++bo_va->ref_count; 126 ++bo_va->ref_count;
130 } 127 }
131 amdgpu_bo_unreserve(rbo); 128 amdgpu_bo_unreserve(rbo);
132 mutex_unlock(&vm->mutex);
133 return 0; 129 return 0;
134} 130}
135 131
@@ -142,10 +138,8 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
142 struct amdgpu_vm *vm = &fpriv->vm; 138 struct amdgpu_vm *vm = &fpriv->vm;
143 struct amdgpu_bo_va *bo_va; 139 struct amdgpu_bo_va *bo_va;
144 int r; 140 int r;
145 mutex_lock(&vm->mutex);
146 r = amdgpu_bo_reserve(rbo, true); 141 r = amdgpu_bo_reserve(rbo, true);
147 if (r) { 142 if (r) {
148 mutex_unlock(&vm->mutex);
149 dev_err(adev->dev, "leaking bo va because " 143 dev_err(adev->dev, "leaking bo va because "
150 "we fail to reserve bo (%d)\n", r); 144 "we fail to reserve bo (%d)\n", r);
151 return; 145 return;
@@ -157,7 +151,6 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
157 } 151 }
158 } 152 }
159 amdgpu_bo_unreserve(rbo); 153 amdgpu_bo_unreserve(rbo);
160 mutex_unlock(&vm->mutex);
161} 154}
162 155
163static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r) 156static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
@@ -242,8 +235,9 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
242 AMDGPU_GEM_USERPTR_REGISTER)) 235 AMDGPU_GEM_USERPTR_REGISTER))
243 return -EINVAL; 236 return -EINVAL;
244 237
245 if (!(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) || 238 if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) && (
246 !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) { 239 !(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) ||
240 !(args->flags & AMDGPU_GEM_USERPTR_REGISTER))) {
247 241
248 /* if we want to write to it we must require anonymous 242 /* if we want to write to it we must require anonymous
249 memory and install a MMU notifier */ 243 memory and install a MMU notifier */
@@ -483,6 +477,9 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
483 if (domain == AMDGPU_GEM_DOMAIN_CPU) 477 if (domain == AMDGPU_GEM_DOMAIN_CPU)
484 goto error_unreserve; 478 goto error_unreserve;
485 } 479 }
480 r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
481 if (r)
482 goto error_unreserve;
486 483
487 r = amdgpu_vm_clear_freed(adev, bo_va->vm); 484 r = amdgpu_vm_clear_freed(adev, bo_va->vm);
488 if (r) 485 if (r)
@@ -512,6 +509,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
512 struct amdgpu_fpriv *fpriv = filp->driver_priv; 509 struct amdgpu_fpriv *fpriv = filp->driver_priv;
513 struct amdgpu_bo *rbo; 510 struct amdgpu_bo *rbo;
514 struct amdgpu_bo_va *bo_va; 511 struct amdgpu_bo_va *bo_va;
512 struct ttm_validate_buffer tv, tv_pd;
513 struct ww_acquire_ctx ticket;
514 struct list_head list, duplicates;
515 uint32_t invalid_flags, va_flags = 0; 515 uint32_t invalid_flags, va_flags = 0;
516 int r = 0; 516 int r = 0;
517 517
@@ -547,19 +547,28 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
547 gobj = drm_gem_object_lookup(dev, filp, args->handle); 547 gobj = drm_gem_object_lookup(dev, filp, args->handle);
548 if (gobj == NULL) 548 if (gobj == NULL)
549 return -ENOENT; 549 return -ENOENT;
550 mutex_lock(&fpriv->vm.mutex);
551 rbo = gem_to_amdgpu_bo(gobj); 550 rbo = gem_to_amdgpu_bo(gobj);
552 r = amdgpu_bo_reserve(rbo, false); 551 INIT_LIST_HEAD(&list);
552 INIT_LIST_HEAD(&duplicates);
553 tv.bo = &rbo->tbo;
554 tv.shared = true;
555 list_add(&tv.head, &list);
556
557 if (args->operation == AMDGPU_VA_OP_MAP) {
558 tv_pd.bo = &fpriv->vm.page_directory->tbo;
559 tv_pd.shared = true;
560 list_add(&tv_pd.head, &list);
561 }
562 r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
553 if (r) { 563 if (r) {
554 mutex_unlock(&fpriv->vm.mutex);
555 drm_gem_object_unreference_unlocked(gobj); 564 drm_gem_object_unreference_unlocked(gobj);
556 return r; 565 return r;
557 } 566 }
558 567
559 bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo); 568 bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo);
560 if (!bo_va) { 569 if (!bo_va) {
561 amdgpu_bo_unreserve(rbo); 570 ttm_eu_backoff_reservation(&ticket, &list);
562 mutex_unlock(&fpriv->vm.mutex); 571 drm_gem_object_unreference_unlocked(gobj);
563 return -ENOENT; 572 return -ENOENT;
564 } 573 }
565 574
@@ -581,10 +590,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
581 default: 590 default:
582 break; 591 break;
583 } 592 }
584 593 ttm_eu_backoff_reservation(&ticket, &list);
585 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) 594 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
586 amdgpu_gem_va_update_vm(adev, bo_va, args->operation); 595 amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
587 mutex_unlock(&fpriv->vm.mutex); 596
588 drm_gem_object_unreference_unlocked(gobj); 597 drm_gem_object_unreference_unlocked(gobj);
589 return r; 598 return r;
590} 599}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index e65987743871..9e25edafa721 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -62,7 +62,7 @@ int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm,
62 int r; 62 int r;
63 63
64 if (size) { 64 if (size) {
65 r = amdgpu_sa_bo_new(adev, &adev->ring_tmp_bo, 65 r = amdgpu_sa_bo_new(&adev->ring_tmp_bo,
66 &ib->sa_bo, size, 256); 66 &ib->sa_bo, size, 256);
67 if (r) { 67 if (r) {
68 dev_err(adev->dev, "failed to get a new IB (%d)\n", r); 68 dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
@@ -216,7 +216,7 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
216 } 216 }
217 217
218 if (ib->vm) 218 if (ib->vm)
219 amdgpu_vm_fence(adev, ib->vm, ib->fence); 219 amdgpu_vm_fence(adev, ib->vm, &ib->fence->base);
220 220
221 amdgpu_ring_unlock_commit(ring); 221 amdgpu_ring_unlock_commit(ring);
222 return 0; 222 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 1618e2294a16..e23843f4d877 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -611,13 +611,59 @@ void amdgpu_driver_preclose_kms(struct drm_device *dev,
611u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe) 611u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
612{ 612{
613 struct amdgpu_device *adev = dev->dev_private; 613 struct amdgpu_device *adev = dev->dev_private;
614 int vpos, hpos, stat;
615 u32 count;
614 616
615 if (pipe >= adev->mode_info.num_crtc) { 617 if (pipe >= adev->mode_info.num_crtc) {
616 DRM_ERROR("Invalid crtc %u\n", pipe); 618 DRM_ERROR("Invalid crtc %u\n", pipe);
617 return -EINVAL; 619 return -EINVAL;
618 } 620 }
619 621
620 return amdgpu_display_vblank_get_counter(adev, pipe); 622 /* The hw increments its frame counter at start of vsync, not at start
623 * of vblank, as is required by DRM core vblank counter handling.
624 * Cook the hw count here to make it appear to the caller as if it
625 * incremented at start of vblank. We measure distance to start of
626 * vblank in vpos. vpos therefore will be >= 0 between start of vblank
627 * and start of vsync, so vpos >= 0 means to bump the hw frame counter
628 * result by 1 to give the proper appearance to caller.
629 */
630 if (adev->mode_info.crtcs[pipe]) {
631 /* Repeat readout if needed to provide stable result if
632 * we cross start of vsync during the queries.
633 */
634 do {
635 count = amdgpu_display_vblank_get_counter(adev, pipe);
636 /* Ask amdgpu_get_crtc_scanoutpos to return vpos as
637 * distance to start of vblank, instead of regular
638 * vertical scanout pos.
639 */
640 stat = amdgpu_get_crtc_scanoutpos(
641 dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
642 &vpos, &hpos, NULL, NULL,
643 &adev->mode_info.crtcs[pipe]->base.hwmode);
644 } while (count != amdgpu_display_vblank_get_counter(adev, pipe));
645
646 if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
647 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
648 DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
649 } else {
650 DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
651 pipe, vpos);
652
653 /* Bump counter if we are at >= leading edge of vblank,
654 * but before vsync where vpos would turn negative and
655 * the hw counter really increments.
656 */
657 if (vpos >= 0)
658 count++;
659 }
660 } else {
661 /* Fallback to use value as is. */
662 count = amdgpu_display_vblank_get_counter(adev, pipe);
663 DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
664 }
665
666 return count;
621} 667}
622 668
623/** 669/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index b62c1710cab6..fdc1be8550da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -35,6 +35,7 @@
35#include <drm/drm_dp_helper.h> 35#include <drm/drm_dp_helper.h>
36#include <drm/drm_fixed.h> 36#include <drm/drm_fixed.h>
37#include <drm/drm_crtc_helper.h> 37#include <drm/drm_crtc_helper.h>
38#include <drm/drm_fb_helper.h>
38#include <drm/drm_plane_helper.h> 39#include <drm/drm_plane_helper.h>
39#include <linux/i2c.h> 40#include <linux/i2c.h>
40#include <linux/i2c-algo-bit.h> 41#include <linux/i2c-algo-bit.h>
@@ -407,6 +408,7 @@ struct amdgpu_crtc {
407 u32 line_time; 408 u32 line_time;
408 u32 wm_low; 409 u32 wm_low;
409 u32 wm_high; 410 u32 wm_high;
411 u32 lb_vblank_lead_lines;
410 struct drm_display_mode hw_mode; 412 struct drm_display_mode hw_mode;
411}; 413};
412 414
@@ -528,6 +530,10 @@ struct amdgpu_framebuffer {
528#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \ 530#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
529 ((em) == ATOM_ENCODER_MODE_DP_MST)) 531 ((em) == ATOM_ENCODER_MODE_DP_MST))
530 532
533/* Driver internal use only flags of amdgpu_get_crtc_scanoutpos() */
534#define USE_REAL_VBLANKSTART (1 << 30)
535#define GET_DISTANCE_TO_VBLANKSTART (1 << 31)
536
531void amdgpu_link_encoder_connector(struct drm_device *dev); 537void amdgpu_link_encoder_connector(struct drm_device *dev);
532 538
533struct drm_connector * 539struct drm_connector *
@@ -551,7 +557,7 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
551 557
552int amdgpu_framebuffer_init(struct drm_device *dev, 558int amdgpu_framebuffer_init(struct drm_device *dev,
553 struct amdgpu_framebuffer *rfb, 559 struct amdgpu_framebuffer *rfb,
554 struct drm_mode_fb_cmd2 *mode_cmd, 560 const struct drm_mode_fb_cmd2 *mode_cmd,
555 struct drm_gem_object *obj); 561 struct drm_gem_object *obj);
556 562
557int amdgpufb_remove(struct drm_device *dev, struct drm_framebuffer *fb); 563int amdgpufb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 0d524384ff79..c3ce103b6a33 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -100,6 +100,7 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
100 list_del_init(&bo->list); 100 list_del_init(&bo->list);
101 mutex_unlock(&bo->adev->gem.mutex); 101 mutex_unlock(&bo->adev->gem.mutex);
102 drm_gem_object_release(&bo->gem_base); 102 drm_gem_object_release(&bo->gem_base);
103 amdgpu_bo_unref(&bo->parent);
103 kfree(bo->metadata); 104 kfree(bo->metadata);
104 kfree(bo); 105 kfree(bo);
105} 106}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 3c2ff4567798..ea756e77b023 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -189,10 +189,9 @@ int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
189 struct amdgpu_sa_manager *sa_manager); 189 struct amdgpu_sa_manager *sa_manager);
190int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev, 190int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
191 struct amdgpu_sa_manager *sa_manager); 191 struct amdgpu_sa_manager *sa_manager);
192int amdgpu_sa_bo_new(struct amdgpu_device *adev, 192int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
193 struct amdgpu_sa_manager *sa_manager, 193 struct amdgpu_sa_bo **sa_bo,
194 struct amdgpu_sa_bo **sa_bo, 194 unsigned size, unsigned align);
195 unsigned size, unsigned align);
196void amdgpu_sa_bo_free(struct amdgpu_device *adev, 195void amdgpu_sa_bo_free(struct amdgpu_device *adev,
197 struct amdgpu_sa_bo **sa_bo, 196 struct amdgpu_sa_bo **sa_bo,
198 struct fence *fence); 197 struct fence *fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 0212b31dc194..8b88edb0434b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -311,8 +311,7 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
311 return false; 311 return false;
312} 312}
313 313
314int amdgpu_sa_bo_new(struct amdgpu_device *adev, 314int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
315 struct amdgpu_sa_manager *sa_manager,
316 struct amdgpu_sa_bo **sa_bo, 315 struct amdgpu_sa_bo **sa_bo,
317 unsigned size, unsigned align) 316 unsigned size, unsigned align)
318{ 317{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index dcf4a8aca680..438c05254695 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -26,6 +26,7 @@
26#include <linux/sched.h> 26#include <linux/sched.h>
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28#include "amdgpu.h" 28#include "amdgpu.h"
29#include "amdgpu_trace.h"
29 30
30static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job) 31static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job)
31{ 32{
@@ -44,11 +45,8 @@ static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job)
44 return NULL; 45 return NULL;
45 } 46 }
46 job = to_amdgpu_job(sched_job); 47 job = to_amdgpu_job(sched_job);
47 mutex_lock(&job->job_lock); 48 trace_amdgpu_sched_run_job(job);
48 r = amdgpu_ib_schedule(job->adev, 49 r = amdgpu_ib_schedule(job->adev, job->num_ibs, job->ibs, job->owner);
49 job->num_ibs,
50 job->ibs,
51 job->base.owner);
52 if (r) { 50 if (r) {
53 DRM_ERROR("Error scheduling IBs (%d)\n", r); 51 DRM_ERROR("Error scheduling IBs (%d)\n", r);
54 goto err; 52 goto err;
@@ -61,8 +59,6 @@ err:
61 if (job->free_job) 59 if (job->free_job)
62 job->free_job(job); 60 job->free_job(job);
63 61
64 mutex_unlock(&job->job_lock);
65 fence_put(&job->base.s_fence->base);
66 kfree(job); 62 kfree(job);
67 return fence ? &fence->base : NULL; 63 return fence ? &fence->base : NULL;
68} 64}
@@ -88,21 +84,19 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
88 return -ENOMEM; 84 return -ENOMEM;
89 job->base.sched = &ring->sched; 85 job->base.sched = &ring->sched;
90 job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity; 86 job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
87 job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner);
88 if (!job->base.s_fence) {
89 kfree(job);
90 return -ENOMEM;
91 }
92 *f = fence_get(&job->base.s_fence->base);
93
91 job->adev = adev; 94 job->adev = adev;
92 job->ibs = ibs; 95 job->ibs = ibs;
93 job->num_ibs = num_ibs; 96 job->num_ibs = num_ibs;
94 job->base.owner = owner; 97 job->owner = owner;
95 mutex_init(&job->job_lock);
96 job->free_job = free_job; 98 job->free_job = free_job;
97 mutex_lock(&job->job_lock); 99 amd_sched_entity_push_job(&job->base);
98 r = amd_sched_entity_push_job(&job->base);
99 if (r) {
100 mutex_unlock(&job->job_lock);
101 kfree(job);
102 return r;
103 }
104 *f = fence_get(&job->base.s_fence->base);
105 mutex_unlock(&job->job_lock);
106 } else { 100 } else {
107 r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner); 101 r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner);
108 if (r) 102 if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
index ff3ca52ec6fe..1caaf201b708 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
@@ -40,7 +40,7 @@ int amdgpu_semaphore_create(struct amdgpu_device *adev,
40 if (*semaphore == NULL) { 40 if (*semaphore == NULL) {
41 return -ENOMEM; 41 return -ENOMEM;
42 } 42 }
43 r = amdgpu_sa_bo_new(adev, &adev->ring_tmp_bo, 43 r = amdgpu_sa_bo_new(&adev->ring_tmp_bo,
44 &(*semaphore)->sa_bo, 8, 8); 44 &(*semaphore)->sa_bo, 8, 8);
45 if (r) { 45 if (r) {
46 kfree(*semaphore); 46 kfree(*semaphore);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index a6697fd05217..dd005c336c97 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -302,8 +302,14 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
302 return -EINVAL; 302 return -EINVAL;
303 } 303 }
304 304
305 if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores || 305 if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores) {
306 (count >= AMDGPU_NUM_SYNCS)) { 306 r = fence_wait(&fence->base, true);
307 if (r)
308 return r;
309 continue;
310 }
311
312 if (count >= AMDGPU_NUM_SYNCS) {
307 /* not enough room, wait manually */ 313 /* not enough room, wait manually */
308 r = fence_wait(&fence->base, false); 314 r = fence_wait(&fence->base, false);
309 if (r) 315 if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 76ecbaf72a2e..8f9834ab1bd5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -48,6 +48,57 @@ TRACE_EVENT(amdgpu_cs,
48 __entry->fences) 48 __entry->fences)
49); 49);
50 50
51TRACE_EVENT(amdgpu_cs_ioctl,
52 TP_PROTO(struct amdgpu_job *job),
53 TP_ARGS(job),
54 TP_STRUCT__entry(
55 __field(struct amdgpu_device *, adev)
56 __field(struct amd_sched_job *, sched_job)
57 __field(struct amdgpu_ib *, ib)
58 __field(struct fence *, fence)
59 __field(char *, ring_name)
60 __field(u32, num_ibs)
61 ),
62
63 TP_fast_assign(
64 __entry->adev = job->adev;
65 __entry->sched_job = &job->base;
66 __entry->ib = job->ibs;
67 __entry->fence = &job->base.s_fence->base;
68 __entry->ring_name = job->ibs[0].ring->name;
69 __entry->num_ibs = job->num_ibs;
70 ),
71 TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u",
72 __entry->adev, __entry->sched_job, __entry->ib,
73 __entry->fence, __entry->ring_name, __entry->num_ibs)
74);
75
76TRACE_EVENT(amdgpu_sched_run_job,
77 TP_PROTO(struct amdgpu_job *job),
78 TP_ARGS(job),
79 TP_STRUCT__entry(
80 __field(struct amdgpu_device *, adev)
81 __field(struct amd_sched_job *, sched_job)
82 __field(struct amdgpu_ib *, ib)
83 __field(struct fence *, fence)
84 __field(char *, ring_name)
85 __field(u32, num_ibs)
86 ),
87
88 TP_fast_assign(
89 __entry->adev = job->adev;
90 __entry->sched_job = &job->base;
91 __entry->ib = job->ibs;
92 __entry->fence = &job->base.s_fence->base;
93 __entry->ring_name = job->ibs[0].ring->name;
94 __entry->num_ibs = job->num_ibs;
95 ),
96 TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u",
97 __entry->adev, __entry->sched_job, __entry->ib,
98 __entry->fence, __entry->ring_name, __entry->num_ibs)
99);
100
101
51TRACE_EVENT(amdgpu_vm_grab_id, 102TRACE_EVENT(amdgpu_vm_grab_id,
52 TP_PROTO(unsigned vmid, int ring), 103 TP_PROTO(unsigned vmid, int ring),
53 TP_ARGS(vmid, ring), 104 TP_ARGS(vmid, ring),
@@ -196,49 +247,6 @@ TRACE_EVENT(amdgpu_bo_list_set,
196 TP_printk("list=%p, bo=%p", __entry->list, __entry->bo) 247 TP_printk("list=%p, bo=%p", __entry->list, __entry->bo)
197); 248);
198 249
199DECLARE_EVENT_CLASS(amdgpu_fence_request,
200
201 TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
202
203 TP_ARGS(dev, ring, seqno),
204
205 TP_STRUCT__entry(
206 __field(u32, dev)
207 __field(int, ring)
208 __field(u32, seqno)
209 ),
210
211 TP_fast_assign(
212 __entry->dev = dev->primary->index;
213 __entry->ring = ring;
214 __entry->seqno = seqno;
215 ),
216
217 TP_printk("dev=%u, ring=%d, seqno=%u",
218 __entry->dev, __entry->ring, __entry->seqno)
219);
220
221DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_emit,
222
223 TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
224
225 TP_ARGS(dev, ring, seqno)
226);
227
228DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_begin,
229
230 TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
231
232 TP_ARGS(dev, ring, seqno)
233);
234
235DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_end,
236
237 TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
238
239 TP_ARGS(dev, ring, seqno)
240);
241
242DECLARE_EVENT_CLASS(amdgpu_semaphore_request, 250DECLARE_EVENT_CLASS(amdgpu_semaphore_request,
243 251
244 TP_PROTO(int ring, struct amdgpu_semaphore *sem), 252 TP_PROTO(int ring, struct amdgpu_semaphore *sem),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 81bb8e9fc26d..8a1752ff3d8e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -587,9 +587,13 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
587 uint32_t flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); 587 uint32_t flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
588 int r; 588 int r;
589 589
590 if (gtt->userptr) 590 if (gtt->userptr) {
591 amdgpu_ttm_tt_pin_userptr(ttm); 591 r = amdgpu_ttm_tt_pin_userptr(ttm);
592 592 if (r) {
593 DRM_ERROR("failed to pin userptr\n");
594 return r;
595 }
596 }
593 gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); 597 gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
594 if (!ttm->num_pages) { 598 if (!ttm->num_pages) {
595 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", 599 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
@@ -797,11 +801,12 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
797 if (mem && mem->mem_type != TTM_PL_SYSTEM) 801 if (mem && mem->mem_type != TTM_PL_SYSTEM)
798 flags |= AMDGPU_PTE_VALID; 802 flags |= AMDGPU_PTE_VALID;
799 803
800 if (mem && mem->mem_type == TTM_PL_TT) 804 if (mem && mem->mem_type == TTM_PL_TT) {
801 flags |= AMDGPU_PTE_SYSTEM; 805 flags |= AMDGPU_PTE_SYSTEM;
802 806
803 if (!ttm || ttm->caching_state == tt_cached) 807 if (ttm->caching_state == tt_cached)
804 flags |= AMDGPU_PTE_SNOOPED; 808 flags |= AMDGPU_PTE_SNOOPED;
809 }
805 810
806 if (adev->asic_type >= CHIP_TOPAZ) 811 if (adev->asic_type >= CHIP_TOPAZ)
807 flags |= AMDGPU_PTE_EXECUTABLE; 812 flags |= AMDGPU_PTE_EXECUTABLE;
@@ -1073,10 +1078,10 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
1073 ret = drm_mm_dump_table(m, mm); 1078 ret = drm_mm_dump_table(m, mm);
1074 spin_unlock(&glob->lru_lock); 1079 spin_unlock(&glob->lru_lock);
1075 if (ttm_pl == TTM_PL_VRAM) 1080 if (ttm_pl == TTM_PL_VRAM)
1076 seq_printf(m, "man size:%llu pages, ram usage:%luMB, vis usage:%luMB\n", 1081 seq_printf(m, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
1077 adev->mman.bdev.man[ttm_pl].size, 1082 adev->mman.bdev.man[ttm_pl].size,
1078 atomic64_read(&adev->vram_usage) >> 20, 1083 (u64)atomic64_read(&adev->vram_usage) >> 20,
1079 atomic64_read(&adev->vram_vis_usage) >> 20); 1084 (u64)atomic64_read(&adev->vram_vis_usage) >> 20);
1080 return ret; 1085 return ret;
1081} 1086}
1082 1087
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 03f0c3bae516..a745eeeb5d82 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -392,7 +392,10 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
392 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */ 392 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
393 ib->ptr[ib->length_dw++] = handle; 393 ib->ptr[ib->length_dw++] = handle;
394 394
395 ib->ptr[ib->length_dw++] = 0x00000030; /* len */ 395 if ((ring->adev->vce.fw_version >> 24) >= 52)
396 ib->ptr[ib->length_dw++] = 0x00000040; /* len */
397 else
398 ib->ptr[ib->length_dw++] = 0x00000030; /* len */
396 ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */ 399 ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */
397 ib->ptr[ib->length_dw++] = 0x00000000; 400 ib->ptr[ib->length_dw++] = 0x00000000;
398 ib->ptr[ib->length_dw++] = 0x00000042; 401 ib->ptr[ib->length_dw++] = 0x00000042;
@@ -404,6 +407,12 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
404 ib->ptr[ib->length_dw++] = 0x00000100; 407 ib->ptr[ib->length_dw++] = 0x00000100;
405 ib->ptr[ib->length_dw++] = 0x0000000c; 408 ib->ptr[ib->length_dw++] = 0x0000000c;
406 ib->ptr[ib->length_dw++] = 0x00000000; 409 ib->ptr[ib->length_dw++] = 0x00000000;
410 if ((ring->adev->vce.fw_version >> 24) >= 52) {
411 ib->ptr[ib->length_dw++] = 0x00000000;
412 ib->ptr[ib->length_dw++] = 0x00000000;
413 ib->ptr[ib->length_dw++] = 0x00000000;
414 ib->ptr[ib->length_dw++] = 0x00000000;
415 }
407 416
408 ib->ptr[ib->length_dw++] = 0x00000014; /* len */ 417 ib->ptr[ib->length_dw++] = 0x00000014; /* len */
409 ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */ 418 ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 633a32a48560..b53d273eb7a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -143,10 +143,15 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
143 unsigned i; 143 unsigned i;
144 144
145 /* check if the id is still valid */ 145 /* check if the id is still valid */
146 if (vm_id->id && vm_id->last_id_use && 146 if (vm_id->id) {
147 vm_id->last_id_use == adev->vm_manager.active[vm_id->id]) { 147 unsigned id = vm_id->id;
148 trace_amdgpu_vm_grab_id(vm_id->id, ring->idx); 148 long owner;
149 return 0; 149
150 owner = atomic_long_read(&adev->vm_manager.ids[id].owner);
151 if (owner == (long)vm) {
152 trace_amdgpu_vm_grab_id(vm_id->id, ring->idx);
153 return 0;
154 }
150 } 155 }
151 156
152 /* we definately need to flush */ 157 /* we definately need to flush */
@@ -154,7 +159,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
154 159
155 /* skip over VMID 0, since it is the system VM */ 160 /* skip over VMID 0, since it is the system VM */
156 for (i = 1; i < adev->vm_manager.nvm; ++i) { 161 for (i = 1; i < adev->vm_manager.nvm; ++i) {
157 struct fence *fence = adev->vm_manager.active[i]; 162 struct fence *fence = adev->vm_manager.ids[i].active;
158 struct amdgpu_ring *fring; 163 struct amdgpu_ring *fring;
159 164
160 if (fence == NULL) { 165 if (fence == NULL) {
@@ -176,7 +181,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
176 if (choices[i]) { 181 if (choices[i]) {
177 struct fence *fence; 182 struct fence *fence;
178 183
179 fence = adev->vm_manager.active[choices[i]]; 184 fence = adev->vm_manager.ids[choices[i]].active;
180 vm_id->id = choices[i]; 185 vm_id->id = choices[i];
181 186
182 trace_amdgpu_vm_grab_id(choices[i], ring->idx); 187 trace_amdgpu_vm_grab_id(choices[i], ring->idx);
@@ -207,24 +212,21 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring,
207 uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); 212 uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
208 struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; 213 struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
209 struct fence *flushed_updates = vm_id->flushed_updates; 214 struct fence *flushed_updates = vm_id->flushed_updates;
210 bool is_earlier = false; 215 bool is_later;
211 216
212 if (flushed_updates && updates) { 217 if (!flushed_updates)
213 BUG_ON(flushed_updates->context != updates->context); 218 is_later = true;
214 is_earlier = (updates->seqno - flushed_updates->seqno <= 219 else if (!updates)
215 INT_MAX) ? true : false; 220 is_later = false;
216 } 221 else
217 222 is_later = fence_is_later(updates, flushed_updates);
218 if (pd_addr != vm_id->pd_gpu_addr || !flushed_updates ||
219 is_earlier) {
220 223
224 if (pd_addr != vm_id->pd_gpu_addr || is_later) {
221 trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id); 225 trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id);
222 if (is_earlier) { 226 if (is_later) {
223 vm_id->flushed_updates = fence_get(updates); 227 vm_id->flushed_updates = fence_get(updates);
224 fence_put(flushed_updates); 228 fence_put(flushed_updates);
225 } 229 }
226 if (!flushed_updates)
227 vm_id->flushed_updates = fence_get(updates);
228 vm_id->pd_gpu_addr = pd_addr; 230 vm_id->pd_gpu_addr = pd_addr;
229 amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr); 231 amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr);
230 } 232 }
@@ -244,16 +246,14 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring,
244 */ 246 */
245void amdgpu_vm_fence(struct amdgpu_device *adev, 247void amdgpu_vm_fence(struct amdgpu_device *adev,
246 struct amdgpu_vm *vm, 248 struct amdgpu_vm *vm,
247 struct amdgpu_fence *fence) 249 struct fence *fence)
248{ 250{
249 unsigned ridx = fence->ring->idx; 251 struct amdgpu_ring *ring = amdgpu_ring_from_fence(fence);
250 unsigned vm_id = vm->ids[ridx].id; 252 unsigned vm_id = vm->ids[ring->idx].id;
251
252 fence_put(adev->vm_manager.active[vm_id]);
253 adev->vm_manager.active[vm_id] = fence_get(&fence->base);
254 253
255 fence_put(vm->ids[ridx].last_id_use); 254 fence_put(adev->vm_manager.ids[vm_id].active);
256 vm->ids[ridx].last_id_use = fence_get(&fence->base); 255 adev->vm_manager.ids[vm_id].active = fence_get(fence);
256 atomic_long_set(&adev->vm_manager.ids[vm_id].owner, (long)vm);
257} 257}
258 258
259/** 259/**
@@ -332,6 +332,8 @@ int amdgpu_vm_free_job(struct amdgpu_job *job)
332 * 332 *
333 * @adev: amdgpu_device pointer 333 * @adev: amdgpu_device pointer
334 * @bo: bo to clear 334 * @bo: bo to clear
335 *
336 * need to reserve bo first before calling it.
335 */ 337 */
336static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, 338static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
337 struct amdgpu_bo *bo) 339 struct amdgpu_bo *bo)
@@ -343,24 +345,20 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
343 uint64_t addr; 345 uint64_t addr;
344 int r; 346 int r;
345 347
346 r = amdgpu_bo_reserve(bo, false);
347 if (r)
348 return r;
349
350 r = reservation_object_reserve_shared(bo->tbo.resv); 348 r = reservation_object_reserve_shared(bo->tbo.resv);
351 if (r) 349 if (r)
352 return r; 350 return r;
353 351
354 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 352 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
355 if (r) 353 if (r)
356 goto error_unreserve; 354 goto error;
357 355
358 addr = amdgpu_bo_gpu_offset(bo); 356 addr = amdgpu_bo_gpu_offset(bo);
359 entries = amdgpu_bo_size(bo) / 8; 357 entries = amdgpu_bo_size(bo) / 8;
360 358
361 ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); 359 ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
362 if (!ib) 360 if (!ib)
363 goto error_unreserve; 361 goto error;
364 362
365 r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib); 363 r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib);
366 if (r) 364 if (r)
@@ -378,16 +376,14 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
378 if (!r) 376 if (!r)
379 amdgpu_bo_fence(bo, fence, true); 377 amdgpu_bo_fence(bo, fence, true);
380 fence_put(fence); 378 fence_put(fence);
381 if (amdgpu_enable_scheduler) { 379 if (amdgpu_enable_scheduler)
382 amdgpu_bo_unreserve(bo);
383 return 0; 380 return 0;
384 } 381
385error_free: 382error_free:
386 amdgpu_ib_free(adev, ib); 383 amdgpu_ib_free(adev, ib);
387 kfree(ib); 384 kfree(ib);
388 385
389error_unreserve: 386error:
390 amdgpu_bo_unreserve(bo);
391 return r; 387 return r;
392} 388}
393 389
@@ -889,17 +885,21 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
889 struct amdgpu_bo_va_mapping *mapping; 885 struct amdgpu_bo_va_mapping *mapping;
890 int r; 886 int r;
891 887
888 spin_lock(&vm->freed_lock);
892 while (!list_empty(&vm->freed)) { 889 while (!list_empty(&vm->freed)) {
893 mapping = list_first_entry(&vm->freed, 890 mapping = list_first_entry(&vm->freed,
894 struct amdgpu_bo_va_mapping, list); 891 struct amdgpu_bo_va_mapping, list);
895 list_del(&mapping->list); 892 list_del(&mapping->list);
896 893 spin_unlock(&vm->freed_lock);
897 r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL); 894 r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL);
898 kfree(mapping); 895 kfree(mapping);
899 if (r) 896 if (r)
900 return r; 897 return r;
901 898
899 spin_lock(&vm->freed_lock);
902 } 900 }
901 spin_unlock(&vm->freed_lock);
902
903 return 0; 903 return 0;
904 904
905} 905}
@@ -926,8 +926,9 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
926 bo_va = list_first_entry(&vm->invalidated, 926 bo_va = list_first_entry(&vm->invalidated,
927 struct amdgpu_bo_va, vm_status); 927 struct amdgpu_bo_va, vm_status);
928 spin_unlock(&vm->status_lock); 928 spin_unlock(&vm->status_lock);
929 929 mutex_lock(&bo_va->mutex);
930 r = amdgpu_vm_bo_update(adev, bo_va, NULL); 930 r = amdgpu_vm_bo_update(adev, bo_va, NULL);
931 mutex_unlock(&bo_va->mutex);
931 if (r) 932 if (r)
932 return r; 933 return r;
933 934
@@ -971,7 +972,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
971 INIT_LIST_HEAD(&bo_va->valids); 972 INIT_LIST_HEAD(&bo_va->valids);
972 INIT_LIST_HEAD(&bo_va->invalids); 973 INIT_LIST_HEAD(&bo_va->invalids);
973 INIT_LIST_HEAD(&bo_va->vm_status); 974 INIT_LIST_HEAD(&bo_va->vm_status);
974 975 mutex_init(&bo_va->mutex);
975 list_add_tail(&bo_va->bo_list, &bo->va); 976 list_add_tail(&bo_va->bo_list, &bo->va);
976 977
977 return bo_va; 978 return bo_va;
@@ -989,7 +990,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
989 * Add a mapping of the BO at the specefied addr into the VM. 990 * Add a mapping of the BO at the specefied addr into the VM.
990 * Returns 0 for success, error for failure. 991 * Returns 0 for success, error for failure.
991 * 992 *
992 * Object has to be reserved and gets unreserved by this function! 993 * Object has to be reserved and unreserved outside!
993 */ 994 */
994int amdgpu_vm_bo_map(struct amdgpu_device *adev, 995int amdgpu_vm_bo_map(struct amdgpu_device *adev,
995 struct amdgpu_bo_va *bo_va, 996 struct amdgpu_bo_va *bo_va,
@@ -1005,30 +1006,27 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1005 1006
1006 /* validate the parameters */ 1007 /* validate the parameters */
1007 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || 1008 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
1008 size == 0 || size & AMDGPU_GPU_PAGE_MASK) { 1009 size == 0 || size & AMDGPU_GPU_PAGE_MASK)
1009 amdgpu_bo_unreserve(bo_va->bo);
1010 return -EINVAL; 1010 return -EINVAL;
1011 }
1012 1011
1013 /* make sure object fit at this offset */ 1012 /* make sure object fit at this offset */
1014 eaddr = saddr + size; 1013 eaddr = saddr + size;
1015 if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) { 1014 if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
1016 amdgpu_bo_unreserve(bo_va->bo);
1017 return -EINVAL; 1015 return -EINVAL;
1018 }
1019 1016
1020 last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE; 1017 last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
1021 if (last_pfn > adev->vm_manager.max_pfn) { 1018 if (last_pfn > adev->vm_manager.max_pfn) {
1022 dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n", 1019 dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n",
1023 last_pfn, adev->vm_manager.max_pfn); 1020 last_pfn, adev->vm_manager.max_pfn);
1024 amdgpu_bo_unreserve(bo_va->bo);
1025 return -EINVAL; 1021 return -EINVAL;
1026 } 1022 }
1027 1023
1028 saddr /= AMDGPU_GPU_PAGE_SIZE; 1024 saddr /= AMDGPU_GPU_PAGE_SIZE;
1029 eaddr /= AMDGPU_GPU_PAGE_SIZE; 1025 eaddr /= AMDGPU_GPU_PAGE_SIZE;
1030 1026
1027 spin_lock(&vm->it_lock);
1031 it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1); 1028 it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1);
1029 spin_unlock(&vm->it_lock);
1032 if (it) { 1030 if (it) {
1033 struct amdgpu_bo_va_mapping *tmp; 1031 struct amdgpu_bo_va_mapping *tmp;
1034 tmp = container_of(it, struct amdgpu_bo_va_mapping, it); 1032 tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
@@ -1036,14 +1034,12 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1036 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " 1034 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1037 "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr, 1035 "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
1038 tmp->it.start, tmp->it.last + 1); 1036 tmp->it.start, tmp->it.last + 1);
1039 amdgpu_bo_unreserve(bo_va->bo);
1040 r = -EINVAL; 1037 r = -EINVAL;
1041 goto error; 1038 goto error;
1042 } 1039 }
1043 1040
1044 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); 1041 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1045 if (!mapping) { 1042 if (!mapping) {
1046 amdgpu_bo_unreserve(bo_va->bo);
1047 r = -ENOMEM; 1043 r = -ENOMEM;
1048 goto error; 1044 goto error;
1049 } 1045 }
@@ -1054,8 +1050,12 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1054 mapping->offset = offset; 1050 mapping->offset = offset;
1055 mapping->flags = flags; 1051 mapping->flags = flags;
1056 1052
1053 mutex_lock(&bo_va->mutex);
1057 list_add(&mapping->list, &bo_va->invalids); 1054 list_add(&mapping->list, &bo_va->invalids);
1055 mutex_unlock(&bo_va->mutex);
1056 spin_lock(&vm->it_lock);
1058 interval_tree_insert(&mapping->it, &vm->va); 1057 interval_tree_insert(&mapping->it, &vm->va);
1058 spin_unlock(&vm->it_lock);
1059 trace_amdgpu_vm_bo_map(bo_va, mapping); 1059 trace_amdgpu_vm_bo_map(bo_va, mapping);
1060 1060
1061 /* Make sure the page tables are allocated */ 1061 /* Make sure the page tables are allocated */
@@ -1067,8 +1067,6 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1067 if (eaddr > vm->max_pde_used) 1067 if (eaddr > vm->max_pde_used)
1068 vm->max_pde_used = eaddr; 1068 vm->max_pde_used = eaddr;
1069 1069
1070 amdgpu_bo_unreserve(bo_va->bo);
1071
1072 /* walk over the address space and allocate the page tables */ 1070 /* walk over the address space and allocate the page tables */
1073 for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { 1071 for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
1074 struct reservation_object *resv = vm->page_directory->tbo.resv; 1072 struct reservation_object *resv = vm->page_directory->tbo.resv;
@@ -1077,16 +1075,19 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1077 if (vm->page_tables[pt_idx].bo) 1075 if (vm->page_tables[pt_idx].bo)
1078 continue; 1076 continue;
1079 1077
1080 ww_mutex_lock(&resv->lock, NULL);
1081 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, 1078 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
1082 AMDGPU_GPU_PAGE_SIZE, true, 1079 AMDGPU_GPU_PAGE_SIZE, true,
1083 AMDGPU_GEM_DOMAIN_VRAM, 1080 AMDGPU_GEM_DOMAIN_VRAM,
1084 AMDGPU_GEM_CREATE_NO_CPU_ACCESS, 1081 AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
1085 NULL, resv, &pt); 1082 NULL, resv, &pt);
1086 ww_mutex_unlock(&resv->lock);
1087 if (r) 1083 if (r)
1088 goto error_free; 1084 goto error_free;
1089 1085
1086 /* Keep a reference to the page table to avoid freeing
1087 * them up in the wrong order.
1088 */
1089 pt->parent = amdgpu_bo_ref(vm->page_directory);
1090
1090 r = amdgpu_vm_clear_bo(adev, pt); 1091 r = amdgpu_vm_clear_bo(adev, pt);
1091 if (r) { 1092 if (r) {
1092 amdgpu_bo_unref(&pt); 1093 amdgpu_bo_unref(&pt);
@@ -1101,7 +1102,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1101 1102
1102error_free: 1103error_free:
1103 list_del(&mapping->list); 1104 list_del(&mapping->list);
1105 spin_lock(&vm->it_lock);
1104 interval_tree_remove(&mapping->it, &vm->va); 1106 interval_tree_remove(&mapping->it, &vm->va);
1107 spin_unlock(&vm->it_lock);
1105 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1108 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1106 kfree(mapping); 1109 kfree(mapping);
1107 1110
@@ -1119,7 +1122,7 @@ error:
1119 * Remove a mapping of the BO at the specefied addr from the VM. 1122 * Remove a mapping of the BO at the specefied addr from the VM.
1120 * Returns 0 for success, error for failure. 1123 * Returns 0 for success, error for failure.
1121 * 1124 *
1122 * Object has to be reserved and gets unreserved by this function! 1125 * Object has to be reserved and unreserved outside!
1123 */ 1126 */
1124int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, 1127int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1125 struct amdgpu_bo_va *bo_va, 1128 struct amdgpu_bo_va *bo_va,
@@ -1130,7 +1133,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1130 bool valid = true; 1133 bool valid = true;
1131 1134
1132 saddr /= AMDGPU_GPU_PAGE_SIZE; 1135 saddr /= AMDGPU_GPU_PAGE_SIZE;
1133 1136 mutex_lock(&bo_va->mutex);
1134 list_for_each_entry(mapping, &bo_va->valids, list) { 1137 list_for_each_entry(mapping, &bo_va->valids, list) {
1135 if (mapping->it.start == saddr) 1138 if (mapping->it.start == saddr)
1136 break; 1139 break;
@@ -1145,20 +1148,24 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1145 } 1148 }
1146 1149
1147 if (&mapping->list == &bo_va->invalids) { 1150 if (&mapping->list == &bo_va->invalids) {
1148 amdgpu_bo_unreserve(bo_va->bo); 1151 mutex_unlock(&bo_va->mutex);
1149 return -ENOENT; 1152 return -ENOENT;
1150 } 1153 }
1151 } 1154 }
1152 1155 mutex_unlock(&bo_va->mutex);
1153 list_del(&mapping->list); 1156 list_del(&mapping->list);
1157 spin_lock(&vm->it_lock);
1154 interval_tree_remove(&mapping->it, &vm->va); 1158 interval_tree_remove(&mapping->it, &vm->va);
1159 spin_unlock(&vm->it_lock);
1155 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1160 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1156 1161
1157 if (valid) 1162 if (valid) {
1163 spin_lock(&vm->freed_lock);
1158 list_add(&mapping->list, &vm->freed); 1164 list_add(&mapping->list, &vm->freed);
1159 else 1165 spin_unlock(&vm->freed_lock);
1166 } else {
1160 kfree(mapping); 1167 kfree(mapping);
1161 amdgpu_bo_unreserve(bo_va->bo); 1168 }
1162 1169
1163 return 0; 1170 return 0;
1164} 1171}
@@ -1187,17 +1194,23 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1187 1194
1188 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { 1195 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
1189 list_del(&mapping->list); 1196 list_del(&mapping->list);
1197 spin_lock(&vm->it_lock);
1190 interval_tree_remove(&mapping->it, &vm->va); 1198 interval_tree_remove(&mapping->it, &vm->va);
1199 spin_unlock(&vm->it_lock);
1191 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1200 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1201 spin_lock(&vm->freed_lock);
1192 list_add(&mapping->list, &vm->freed); 1202 list_add(&mapping->list, &vm->freed);
1203 spin_unlock(&vm->freed_lock);
1193 } 1204 }
1194 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { 1205 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
1195 list_del(&mapping->list); 1206 list_del(&mapping->list);
1207 spin_lock(&vm->it_lock);
1196 interval_tree_remove(&mapping->it, &vm->va); 1208 interval_tree_remove(&mapping->it, &vm->va);
1209 spin_unlock(&vm->it_lock);
1197 kfree(mapping); 1210 kfree(mapping);
1198 } 1211 }
1199
1200 fence_put(bo_va->last_pt_update); 1212 fence_put(bo_va->last_pt_update);
1213 mutex_destroy(&bo_va->mutex);
1201 kfree(bo_va); 1214 kfree(bo_va);
1202} 1215}
1203 1216
@@ -1241,15 +1254,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1241 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 1254 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1242 vm->ids[i].id = 0; 1255 vm->ids[i].id = 0;
1243 vm->ids[i].flushed_updates = NULL; 1256 vm->ids[i].flushed_updates = NULL;
1244 vm->ids[i].last_id_use = NULL;
1245 } 1257 }
1246 mutex_init(&vm->mutex);
1247 vm->va = RB_ROOT; 1258 vm->va = RB_ROOT;
1248 spin_lock_init(&vm->status_lock); 1259 spin_lock_init(&vm->status_lock);
1249 INIT_LIST_HEAD(&vm->invalidated); 1260 INIT_LIST_HEAD(&vm->invalidated);
1250 INIT_LIST_HEAD(&vm->cleared); 1261 INIT_LIST_HEAD(&vm->cleared);
1251 INIT_LIST_HEAD(&vm->freed); 1262 INIT_LIST_HEAD(&vm->freed);
1252 1263 spin_lock_init(&vm->it_lock);
1264 spin_lock_init(&vm->freed_lock);
1253 pd_size = amdgpu_vm_directory_size(adev); 1265 pd_size = amdgpu_vm_directory_size(adev);
1254 pd_entries = amdgpu_vm_num_pdes(adev); 1266 pd_entries = amdgpu_vm_num_pdes(adev);
1255 1267
@@ -1269,8 +1281,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1269 NULL, NULL, &vm->page_directory); 1281 NULL, NULL, &vm->page_directory);
1270 if (r) 1282 if (r)
1271 return r; 1283 return r;
1272 1284 r = amdgpu_bo_reserve(vm->page_directory, false);
1285 if (r) {
1286 amdgpu_bo_unref(&vm->page_directory);
1287 vm->page_directory = NULL;
1288 return r;
1289 }
1273 r = amdgpu_vm_clear_bo(adev, vm->page_directory); 1290 r = amdgpu_vm_clear_bo(adev, vm->page_directory);
1291 amdgpu_bo_unreserve(vm->page_directory);
1274 if (r) { 1292 if (r) {
1275 amdgpu_bo_unref(&vm->page_directory); 1293 amdgpu_bo_unref(&vm->page_directory);
1276 vm->page_directory = NULL; 1294 vm->page_directory = NULL;
@@ -1313,11 +1331,27 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1313 1331
1314 amdgpu_bo_unref(&vm->page_directory); 1332 amdgpu_bo_unref(&vm->page_directory);
1315 fence_put(vm->page_directory_fence); 1333 fence_put(vm->page_directory_fence);
1316
1317 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 1334 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1335 unsigned id = vm->ids[i].id;
1336
1337 atomic_long_cmpxchg(&adev->vm_manager.ids[id].owner,
1338 (long)vm, 0);
1318 fence_put(vm->ids[i].flushed_updates); 1339 fence_put(vm->ids[i].flushed_updates);
1319 fence_put(vm->ids[i].last_id_use);
1320 } 1340 }
1321 1341
1322 mutex_destroy(&vm->mutex); 1342}
1343
1344/**
1345 * amdgpu_vm_manager_fini - cleanup VM manager
1346 *
1347 * @adev: amdgpu_device pointer
1348 *
1349 * Cleanup the VM manager and free resources.
1350 */
1351void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
1352{
1353 unsigned i;
1354
1355 for (i = 0; i < AMDGPU_NUM_VM; ++i)
1356 fence_put(adev->vm_manager.ids[i].active);
1323} 1357}
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index a1a35a5df8e7..57a2e347f04d 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -6569,12 +6569,12 @@ static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev,
6569 switch (state) { 6569 switch (state) {
6570 case AMDGPU_IRQ_STATE_DISABLE: 6570 case AMDGPU_IRQ_STATE_DISABLE:
6571 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); 6571 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6572 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; 6572 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
6573 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); 6573 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6574 break; 6574 break;
6575 case AMDGPU_IRQ_STATE_ENABLE: 6575 case AMDGPU_IRQ_STATE_ENABLE:
6576 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); 6576 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6577 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; 6577 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
6578 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); 6578 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6579 break; 6579 break;
6580 default: 6580 default:
@@ -6586,12 +6586,12 @@ static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev,
6586 switch (state) { 6586 switch (state) {
6587 case AMDGPU_IRQ_STATE_DISABLE: 6587 case AMDGPU_IRQ_STATE_DISABLE:
6588 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); 6588 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6589 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; 6589 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
6590 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); 6590 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6591 break; 6591 break;
6592 case AMDGPU_IRQ_STATE_ENABLE: 6592 case AMDGPU_IRQ_STATE_ENABLE:
6593 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); 6593 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6594 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; 6594 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
6595 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); 6595 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6596 break; 6596 break;
6597 default: 6597 default:
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index cb0f7747e3dc..093599aba64b 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1250,7 +1250,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
1250 u32 pixel_period; 1250 u32 pixel_period;
1251 u32 line_time = 0; 1251 u32 line_time = 0;
1252 u32 latency_watermark_a = 0, latency_watermark_b = 0; 1252 u32 latency_watermark_a = 0, latency_watermark_b = 0;
1253 u32 tmp, wm_mask; 1253 u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
1254 1254
1255 if (amdgpu_crtc->base.enabled && num_heads && mode) { 1255 if (amdgpu_crtc->base.enabled && num_heads && mode) {
1256 pixel_period = 1000000 / (u32)mode->clock; 1256 pixel_period = 1000000 / (u32)mode->clock;
@@ -1333,6 +1333,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
1333 (adev->mode_info.disp_priority == 2)) { 1333 (adev->mode_info.disp_priority == 2)) {
1334 DRM_DEBUG_KMS("force priority to high\n"); 1334 DRM_DEBUG_KMS("force priority to high\n");
1335 } 1335 }
1336 lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1336 } 1337 }
1337 1338
1338 /* select wm A */ 1339 /* select wm A */
@@ -1357,6 +1358,8 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
1357 amdgpu_crtc->line_time = line_time; 1358 amdgpu_crtc->line_time = line_time;
1358 amdgpu_crtc->wm_high = latency_watermark_a; 1359 amdgpu_crtc->wm_high = latency_watermark_a;
1359 amdgpu_crtc->wm_low = latency_watermark_b; 1360 amdgpu_crtc->wm_low = latency_watermark_b;
1361 /* Save number of lines the linebuffer leads before the scanout */
1362 amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1360} 1363}
1361 1364
1362/** 1365/**
@@ -3726,7 +3729,7 @@ static void dce_v10_0_encoder_add(struct amdgpu_device *adev,
3726 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: 3729 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3727 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: 3730 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3728 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, 3731 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3729 DRM_MODE_ENCODER_DAC); 3732 DRM_MODE_ENCODER_DAC, NULL);
3730 drm_encoder_helper_add(encoder, &dce_v10_0_dac_helper_funcs); 3733 drm_encoder_helper_add(encoder, &dce_v10_0_dac_helper_funcs);
3731 break; 3734 break;
3732 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 3735 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
@@ -3737,15 +3740,15 @@ static void dce_v10_0_encoder_add(struct amdgpu_device *adev,
3737 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 3740 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3738 amdgpu_encoder->rmx_type = RMX_FULL; 3741 amdgpu_encoder->rmx_type = RMX_FULL;
3739 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, 3742 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3740 DRM_MODE_ENCODER_LVDS); 3743 DRM_MODE_ENCODER_LVDS, NULL);
3741 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder); 3744 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3742 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { 3745 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3743 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, 3746 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3744 DRM_MODE_ENCODER_DAC); 3747 DRM_MODE_ENCODER_DAC, NULL);
3745 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); 3748 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3746 } else { 3749 } else {
3747 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, 3750 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3748 DRM_MODE_ENCODER_TMDS); 3751 DRM_MODE_ENCODER_TMDS, NULL);
3749 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); 3752 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3750 } 3753 }
3751 drm_encoder_helper_add(encoder, &dce_v10_0_dig_helper_funcs); 3754 drm_encoder_helper_add(encoder, &dce_v10_0_dig_helper_funcs);
@@ -3763,13 +3766,13 @@ static void dce_v10_0_encoder_add(struct amdgpu_device *adev,
3763 amdgpu_encoder->is_ext_encoder = true; 3766 amdgpu_encoder->is_ext_encoder = true;
3764 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 3767 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3765 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, 3768 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3766 DRM_MODE_ENCODER_LVDS); 3769 DRM_MODE_ENCODER_LVDS, NULL);
3767 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) 3770 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3768 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, 3771 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3769 DRM_MODE_ENCODER_DAC); 3772 DRM_MODE_ENCODER_DAC, NULL);
3770 else 3773 else
3771 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, 3774 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3772 DRM_MODE_ENCODER_TMDS); 3775 DRM_MODE_ENCODER_TMDS, NULL);
3773 drm_encoder_helper_add(encoder, &dce_v10_0_ext_helper_funcs); 3776 drm_encoder_helper_add(encoder, &dce_v10_0_ext_helper_funcs);
3774 break; 3777 break;
3775 } 3778 }
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 5af3721851d6..8701661a8868 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -1238,7 +1238,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
1238 u32 pixel_period; 1238 u32 pixel_period;
1239 u32 line_time = 0; 1239 u32 line_time = 0;
1240 u32 latency_watermark_a = 0, latency_watermark_b = 0; 1240 u32 latency_watermark_a = 0, latency_watermark_b = 0;
1241 u32 tmp, wm_mask; 1241 u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
1242 1242
1243 if (amdgpu_crtc->base.enabled && num_heads && mode) { 1243 if (amdgpu_crtc->base.enabled && num_heads && mode) {
1244 pixel_period = 1000000 / (u32)mode->clock; 1244 pixel_period = 1000000 / (u32)mode->clock;
@@ -1321,6 +1321,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
1321 (adev->mode_info.disp_priority == 2)) { 1321 (adev->mode_info.disp_priority == 2)) {
1322 DRM_DEBUG_KMS("force priority to high\n"); 1322 DRM_DEBUG_KMS("force priority to high\n");
1323 } 1323 }
1324 lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1324 } 1325 }
1325 1326
1326 /* select wm A */ 1327 /* select wm A */
@@ -1345,6 +1346,8 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
1345 amdgpu_crtc->line_time = line_time; 1346 amdgpu_crtc->line_time = line_time;
1346 amdgpu_crtc->wm_high = latency_watermark_a; 1347 amdgpu_crtc->wm_high = latency_watermark_a;
1347 amdgpu_crtc->wm_low = latency_watermark_b; 1348 amdgpu_crtc->wm_low = latency_watermark_b;
1349 /* Save number of lines the linebuffer leads before the scanout */
1350 amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1348} 1351}
1349 1352
1350/** 1353/**
@@ -3719,7 +3722,7 @@ static void dce_v11_0_encoder_add(struct amdgpu_device *adev,
3719 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: 3722 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3720 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: 3723 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3721 drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, 3724 drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3722 DRM_MODE_ENCODER_DAC); 3725 DRM_MODE_ENCODER_DAC, NULL);
3723 drm_encoder_helper_add(encoder, &dce_v11_0_dac_helper_funcs); 3726 drm_encoder_helper_add(encoder, &dce_v11_0_dac_helper_funcs);
3724 break; 3727 break;
3725 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 3728 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
@@ -3730,15 +3733,15 @@ static void dce_v11_0_encoder_add(struct amdgpu_device *adev,
3730 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 3733 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3731 amdgpu_encoder->rmx_type = RMX_FULL; 3734 amdgpu_encoder->rmx_type = RMX_FULL;
3732 drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, 3735 drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3733 DRM_MODE_ENCODER_LVDS); 3736 DRM_MODE_ENCODER_LVDS, NULL);
3734 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder); 3737 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3735 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { 3738 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3736 drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, 3739 drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3737 DRM_MODE_ENCODER_DAC); 3740 DRM_MODE_ENCODER_DAC, NULL);
3738 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); 3741 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3739 } else { 3742 } else {
3740 drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, 3743 drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3741 DRM_MODE_ENCODER_TMDS); 3744 DRM_MODE_ENCODER_TMDS, NULL);
3742 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); 3745 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3743 } 3746 }
3744 drm_encoder_helper_add(encoder, &dce_v11_0_dig_helper_funcs); 3747 drm_encoder_helper_add(encoder, &dce_v11_0_dig_helper_funcs);
@@ -3756,13 +3759,13 @@ static void dce_v11_0_encoder_add(struct amdgpu_device *adev,
3756 amdgpu_encoder->is_ext_encoder = true; 3759 amdgpu_encoder->is_ext_encoder = true;
3757 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 3760 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3758 drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, 3761 drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3759 DRM_MODE_ENCODER_LVDS); 3762 DRM_MODE_ENCODER_LVDS, NULL);
3760 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) 3763 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3761 drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, 3764 drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3762 DRM_MODE_ENCODER_DAC); 3765 DRM_MODE_ENCODER_DAC, NULL);
3763 else 3766 else
3764 drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, 3767 drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3765 DRM_MODE_ENCODER_TMDS); 3768 DRM_MODE_ENCODER_TMDS, NULL);
3766 drm_encoder_helper_add(encoder, &dce_v11_0_ext_helper_funcs); 3769 drm_encoder_helper_add(encoder, &dce_v11_0_ext_helper_funcs);
3767 break; 3770 break;
3768 } 3771 }
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 4f7b49a6dc50..d0e128c24813 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1193,7 +1193,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
1193 u32 pixel_period; 1193 u32 pixel_period;
1194 u32 line_time = 0; 1194 u32 line_time = 0;
1195 u32 latency_watermark_a = 0, latency_watermark_b = 0; 1195 u32 latency_watermark_a = 0, latency_watermark_b = 0;
1196 u32 tmp, wm_mask; 1196 u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
1197 1197
1198 if (amdgpu_crtc->base.enabled && num_heads && mode) { 1198 if (amdgpu_crtc->base.enabled && num_heads && mode) {
1199 pixel_period = 1000000 / (u32)mode->clock; 1199 pixel_period = 1000000 / (u32)mode->clock;
@@ -1276,6 +1276,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
1276 (adev->mode_info.disp_priority == 2)) { 1276 (adev->mode_info.disp_priority == 2)) {
1277 DRM_DEBUG_KMS("force priority to high\n"); 1277 DRM_DEBUG_KMS("force priority to high\n");
1278 } 1278 }
1279 lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1279 } 1280 }
1280 1281
1281 /* select wm A */ 1282 /* select wm A */
@@ -1302,6 +1303,8 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
1302 amdgpu_crtc->line_time = line_time; 1303 amdgpu_crtc->line_time = line_time;
1303 amdgpu_crtc->wm_high = latency_watermark_a; 1304 amdgpu_crtc->wm_high = latency_watermark_a;
1304 amdgpu_crtc->wm_low = latency_watermark_b; 1305 amdgpu_crtc->wm_low = latency_watermark_b;
1306 /* Save number of lines the linebuffer leads before the scanout */
1307 amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1305} 1308}
1306 1309
1307/** 1310/**
@@ -3656,7 +3659,7 @@ static void dce_v8_0_encoder_add(struct amdgpu_device *adev,
3656 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: 3659 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3657 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: 3660 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3658 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, 3661 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3659 DRM_MODE_ENCODER_DAC); 3662 DRM_MODE_ENCODER_DAC, NULL);
3660 drm_encoder_helper_add(encoder, &dce_v8_0_dac_helper_funcs); 3663 drm_encoder_helper_add(encoder, &dce_v8_0_dac_helper_funcs);
3661 break; 3664 break;
3662 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 3665 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
@@ -3667,15 +3670,15 @@ static void dce_v8_0_encoder_add(struct amdgpu_device *adev,
3667 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 3670 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3668 amdgpu_encoder->rmx_type = RMX_FULL; 3671 amdgpu_encoder->rmx_type = RMX_FULL;
3669 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, 3672 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3670 DRM_MODE_ENCODER_LVDS); 3673 DRM_MODE_ENCODER_LVDS, NULL);
3671 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder); 3674 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3672 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { 3675 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3673 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, 3676 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3674 DRM_MODE_ENCODER_DAC); 3677 DRM_MODE_ENCODER_DAC, NULL);
3675 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); 3678 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3676 } else { 3679 } else {
3677 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, 3680 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3678 DRM_MODE_ENCODER_TMDS); 3681 DRM_MODE_ENCODER_TMDS, NULL);
3679 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); 3682 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3680 } 3683 }
3681 drm_encoder_helper_add(encoder, &dce_v8_0_dig_helper_funcs); 3684 drm_encoder_helper_add(encoder, &dce_v8_0_dig_helper_funcs);
@@ -3693,13 +3696,13 @@ static void dce_v8_0_encoder_add(struct amdgpu_device *adev,
3693 amdgpu_encoder->is_ext_encoder = true; 3696 amdgpu_encoder->is_ext_encoder = true;
3694 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 3697 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3695 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, 3698 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3696 DRM_MODE_ENCODER_LVDS); 3699 DRM_MODE_ENCODER_LVDS, NULL);
3697 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) 3700 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3698 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, 3701 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3699 DRM_MODE_ENCODER_DAC); 3702 DRM_MODE_ENCODER_DAC, NULL);
3700 else 3703 else
3701 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, 3704 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3702 DRM_MODE_ENCODER_TMDS); 3705 DRM_MODE_ENCODER_TMDS, NULL);
3703 drm_encoder_helper_add(encoder, &dce_v8_0_ext_helper_funcs); 3706 drm_encoder_helper_add(encoder, &dce_v8_0_ext_helper_funcs);
3704 break; 3707 break;
3705 } 3708 }
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 6776cf756d40..e1dcab98e249 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -268,7 +268,6 @@ static const u32 fiji_mgcg_cgcg_init[] =
268 mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100, 268 mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
269 mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100, 269 mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
270 mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100, 270 mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
271 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
272 mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100, 271 mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
273 mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100, 272 mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
274 mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100, 273 mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
@@ -296,10 +295,6 @@ static const u32 fiji_mgcg_cgcg_init[] =
296 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200, 295 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
297 mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100, 296 mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
298 mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c, 297 mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
299 mmPCIE_INDEX, 0xffffffff, 0x0140001c,
300 mmPCIE_DATA, 0x000f0000, 0x00000000,
301 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
302 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
303 mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001, 298 mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
304}; 299};
305 300
@@ -1000,7 +995,7 @@ static void gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
1000 adev->gfx.config.max_cu_per_sh = 16; 995 adev->gfx.config.max_cu_per_sh = 16;
1001 adev->gfx.config.max_sh_per_se = 1; 996 adev->gfx.config.max_sh_per_se = 1;
1002 adev->gfx.config.max_backends_per_se = 4; 997 adev->gfx.config.max_backends_per_se = 4;
1003 adev->gfx.config.max_texture_channel_caches = 8; 998 adev->gfx.config.max_texture_channel_caches = 16;
1004 adev->gfx.config.max_gprs = 256; 999 adev->gfx.config.max_gprs = 256;
1005 adev->gfx.config.max_gs_threads = 32; 1000 adev->gfx.config.max_gs_threads = 32;
1006 adev->gfx.config.max_hw_contexts = 8; 1001 adev->gfx.config.max_hw_contexts = 8;
@@ -1613,6 +1608,296 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
1613 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); 1608 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
1614 } 1609 }
1615 case CHIP_FIJI: 1610 case CHIP_FIJI:
1611 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
1612 switch (reg_offset) {
1613 case 0:
1614 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1615 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1616 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1617 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1618 break;
1619 case 1:
1620 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1621 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1622 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1623 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1624 break;
1625 case 2:
1626 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1627 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1628 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1629 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1630 break;
1631 case 3:
1632 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1633 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1634 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1635 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1636 break;
1637 case 4:
1638 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1639 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1640 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1641 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1642 break;
1643 case 5:
1644 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1645 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1646 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1647 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1648 break;
1649 case 6:
1650 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1651 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1652 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1653 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1654 break;
1655 case 7:
1656 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1657 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1658 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1659 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1660 break;
1661 case 8:
1662 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1663 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
1664 break;
1665 case 9:
1666 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1667 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1668 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1669 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1670 break;
1671 case 10:
1672 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1673 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1674 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1675 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1676 break;
1677 case 11:
1678 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1679 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1680 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1681 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1682 break;
1683 case 12:
1684 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1685 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1686 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1687 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1688 break;
1689 case 13:
1690 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1691 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1692 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1693 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1694 break;
1695 case 14:
1696 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1697 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1698 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1699 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1700 break;
1701 case 15:
1702 gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1703 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1704 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1705 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1706 break;
1707 case 16:
1708 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1709 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1710 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1711 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1712 break;
1713 case 17:
1714 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1715 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1716 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1717 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1718 break;
1719 case 18:
1720 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1721 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1722 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1723 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1724 break;
1725 case 19:
1726 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1727 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1728 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1729 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1730 break;
1731 case 20:
1732 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1733 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1734 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1735 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1736 break;
1737 case 21:
1738 gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1739 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1740 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1741 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1742 break;
1743 case 22:
1744 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1745 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1746 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1747 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1748 break;
1749 case 23:
1750 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1751 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1752 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1753 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1754 break;
1755 case 24:
1756 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1757 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1758 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1759 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1760 break;
1761 case 25:
1762 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1763 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1764 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1765 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1766 break;
1767 case 26:
1768 gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1769 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1770 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1771 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1772 break;
1773 case 27:
1774 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1775 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1776 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1777 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1778 break;
1779 case 28:
1780 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1781 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1782 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1783 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1784 break;
1785 case 29:
1786 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1787 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1788 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1789 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1790 break;
1791 case 30:
1792 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1793 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1794 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1795 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1796 break;
1797 default:
1798 gb_tile_moden = 0;
1799 break;
1800 }
1801 adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
1802 WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
1803 }
1804 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
1805 switch (reg_offset) {
1806 case 0:
1807 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1808 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1809 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1810 NUM_BANKS(ADDR_SURF_8_BANK));
1811 break;
1812 case 1:
1813 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1814 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1815 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1816 NUM_BANKS(ADDR_SURF_8_BANK));
1817 break;
1818 case 2:
1819 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1820 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1821 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1822 NUM_BANKS(ADDR_SURF_8_BANK));
1823 break;
1824 case 3:
1825 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1826 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1827 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1828 NUM_BANKS(ADDR_SURF_8_BANK));
1829 break;
1830 case 4:
1831 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1832 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1833 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1834 NUM_BANKS(ADDR_SURF_8_BANK));
1835 break;
1836 case 5:
1837 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1838 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1839 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1840 NUM_BANKS(ADDR_SURF_8_BANK));
1841 break;
1842 case 6:
1843 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1844 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1845 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1846 NUM_BANKS(ADDR_SURF_8_BANK));
1847 break;
1848 case 8:
1849 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1850 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1851 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1852 NUM_BANKS(ADDR_SURF_8_BANK));
1853 break;
1854 case 9:
1855 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1856 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1857 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1858 NUM_BANKS(ADDR_SURF_8_BANK));
1859 break;
1860 case 10:
1861 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1862 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1863 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1864 NUM_BANKS(ADDR_SURF_8_BANK));
1865 break;
1866 case 11:
1867 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1868 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1869 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1870 NUM_BANKS(ADDR_SURF_8_BANK));
1871 break;
1872 case 12:
1873 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1874 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1875 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1876 NUM_BANKS(ADDR_SURF_8_BANK));
1877 break;
1878 case 13:
1879 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1880 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1881 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1882 NUM_BANKS(ADDR_SURF_8_BANK));
1883 break;
1884 case 14:
1885 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1886 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1887 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1888 NUM_BANKS(ADDR_SURF_4_BANK));
1889 break;
1890 case 7:
1891 /* unused idx */
1892 continue;
1893 default:
1894 gb_tile_moden = 0;
1895 break;
1896 }
1897 adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
1898 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
1899 }
1900 break;
1616 case CHIP_TONGA: 1901 case CHIP_TONGA:
1617 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { 1902 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
1618 switch (reg_offset) { 1903 switch (reg_offset) {
@@ -2971,10 +3256,13 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
2971 amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START); 3256 amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
2972 switch (adev->asic_type) { 3257 switch (adev->asic_type) {
2973 case CHIP_TONGA: 3258 case CHIP_TONGA:
2974 case CHIP_FIJI:
2975 amdgpu_ring_write(ring, 0x16000012); 3259 amdgpu_ring_write(ring, 0x16000012);
2976 amdgpu_ring_write(ring, 0x0000002A); 3260 amdgpu_ring_write(ring, 0x0000002A);
2977 break; 3261 break;
3262 case CHIP_FIJI:
3263 amdgpu_ring_write(ring, 0x3a00161a);
3264 amdgpu_ring_write(ring, 0x0000002e);
3265 break;
2978 case CHIP_TOPAZ: 3266 case CHIP_TOPAZ:
2979 case CHIP_CARRIZO: 3267 case CHIP_CARRIZO:
2980 amdgpu_ring_write(ring, 0x00000002); 3268 amdgpu_ring_write(ring, 0x00000002);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 85bbcdc73fff..ed8abb58a785 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -40,7 +40,7 @@
40static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev); 40static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
41static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev); 41static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
42 42
43MODULE_FIRMWARE("radeon/boniare_mc.bin"); 43MODULE_FIRMWARE("radeon/bonaire_mc.bin");
44MODULE_FIRMWARE("radeon/hawaii_mc.bin"); 44MODULE_FIRMWARE("radeon/hawaii_mc.bin");
45 45
46/** 46/**
@@ -501,6 +501,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
501 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1); 501 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
502 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7); 502 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
503 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); 503 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
504 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
504 WREG32(mmVM_L2_CNTL, tmp); 505 WREG32(mmVM_L2_CNTL, tmp);
505 tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); 506 tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
506 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); 507 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
@@ -512,7 +513,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
512 WREG32(mmVM_L2_CNTL3, tmp); 513 WREG32(mmVM_L2_CNTL3, tmp);
513 /* setup context0 */ 514 /* setup context0 */
514 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12); 515 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
515 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, (adev->mc.gtt_end >> 12) - 1); 516 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
516 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); 517 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
517 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 518 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
518 (u32)(adev->dummy_page.addr >> 12)); 519 (u32)(adev->dummy_page.addr >> 12));
@@ -960,12 +961,10 @@ static int gmc_v7_0_sw_init(void *handle)
960 961
961static int gmc_v7_0_sw_fini(void *handle) 962static int gmc_v7_0_sw_fini(void *handle)
962{ 963{
963 int i;
964 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 964 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
965 965
966 if (adev->vm_manager.enabled) { 966 if (adev->vm_manager.enabled) {
967 for (i = 0; i < AMDGPU_NUM_VM; ++i) 967 amdgpu_vm_manager_fini(adev);
968 fence_put(adev->vm_manager.active[i]);
969 gmc_v7_0_vm_fini(adev); 968 gmc_v7_0_vm_fini(adev);
970 adev->vm_manager.enabled = false; 969 adev->vm_manager.enabled = false;
971 } 970 }
@@ -1010,12 +1009,10 @@ static int gmc_v7_0_hw_fini(void *handle)
1010 1009
1011static int gmc_v7_0_suspend(void *handle) 1010static int gmc_v7_0_suspend(void *handle)
1012{ 1011{
1013 int i;
1014 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1012 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1015 1013
1016 if (adev->vm_manager.enabled) { 1014 if (adev->vm_manager.enabled) {
1017 for (i = 0; i < AMDGPU_NUM_VM; ++i) 1015 amdgpu_vm_manager_fini(adev);
1018 fence_put(adev->vm_manager.active[i]);
1019 gmc_v7_0_vm_fini(adev); 1016 gmc_v7_0_vm_fini(adev);
1020 adev->vm_manager.enabled = false; 1017 adev->vm_manager.enabled = false;
1021 } 1018 }
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 1bcc4e74e3b4..d39028440814 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -629,6 +629,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
629 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1); 629 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
630 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7); 630 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
631 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); 631 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
632 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
632 WREG32(mmVM_L2_CNTL, tmp); 633 WREG32(mmVM_L2_CNTL, tmp);
633 tmp = RREG32(mmVM_L2_CNTL2); 634 tmp = RREG32(mmVM_L2_CNTL2);
634 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); 635 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
@@ -656,7 +657,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
656 WREG32(mmVM_L2_CNTL4, tmp); 657 WREG32(mmVM_L2_CNTL4, tmp);
657 /* setup context0 */ 658 /* setup context0 */
658 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12); 659 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
659 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, (adev->mc.gtt_end >> 12) - 1); 660 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
660 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); 661 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
661 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 662 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
662 (u32)(adev->dummy_page.addr >> 12)); 663 (u32)(adev->dummy_page.addr >> 12));
@@ -979,12 +980,10 @@ static int gmc_v8_0_sw_init(void *handle)
979 980
980static int gmc_v8_0_sw_fini(void *handle) 981static int gmc_v8_0_sw_fini(void *handle)
981{ 982{
982 int i;
983 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 983 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
984 984
985 if (adev->vm_manager.enabled) { 985 if (adev->vm_manager.enabled) {
986 for (i = 0; i < AMDGPU_NUM_VM; ++i) 986 amdgpu_vm_manager_fini(adev);
987 fence_put(adev->vm_manager.active[i]);
988 gmc_v8_0_vm_fini(adev); 987 gmc_v8_0_vm_fini(adev);
989 adev->vm_manager.enabled = false; 988 adev->vm_manager.enabled = false;
990 } 989 }
@@ -1031,12 +1030,10 @@ static int gmc_v8_0_hw_fini(void *handle)
1031 1030
1032static int gmc_v8_0_suspend(void *handle) 1031static int gmc_v8_0_suspend(void *handle)
1033{ 1032{
1034 int i;
1035 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1033 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1036 1034
1037 if (adev->vm_manager.enabled) { 1035 if (adev->vm_manager.enabled) {
1038 for (i = 0; i < AMDGPU_NUM_VM; ++i) 1036 amdgpu_vm_manager_fini(adev);
1039 fence_put(adev->vm_manager.active[i]);
1040 gmc_v8_0_vm_fini(adev); 1037 gmc_v8_0_vm_fini(adev);
1041 adev->vm_manager.enabled = false; 1038 adev->vm_manager.enabled = false;
1042 } 1039 }
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 6a52db6ad8d7..370c6c9d81c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -40,6 +40,9 @@
40 40
41#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 41#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
42#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 42#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
43#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616
44#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617
45#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618
43 46
44#define VCE_V3_0_FW_SIZE (384 * 1024) 47#define VCE_V3_0_FW_SIZE (384 * 1024)
45#define VCE_V3_0_STACK_SIZE (64 * 1024) 48#define VCE_V3_0_STACK_SIZE (64 * 1024)
@@ -130,9 +133,11 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
130 133
131 /* set BUSY flag */ 134 /* set BUSY flag */
132 WREG32_P(mmVCE_STATUS, 1, ~1); 135 WREG32_P(mmVCE_STATUS, 1, ~1);
133 136 if (adev->asic_type >= CHIP_STONEY)
134 WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, 137 WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001);
135 ~VCE_VCPU_CNTL__CLK_EN_MASK); 138 else
139 WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK,
140 ~VCE_VCPU_CNTL__CLK_EN_MASK);
136 141
137 WREG32_P(mmVCE_SOFT_RESET, 142 WREG32_P(mmVCE_SOFT_RESET,
138 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, 143 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
@@ -391,8 +396,12 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
391 WREG32(mmVCE_LMI_SWAP_CNTL, 0); 396 WREG32(mmVCE_LMI_SWAP_CNTL, 0);
392 WREG32(mmVCE_LMI_SWAP_CNTL1, 0); 397 WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
393 WREG32(mmVCE_LMI_VM_CTRL, 0); 398 WREG32(mmVCE_LMI_VM_CTRL, 0);
394 399 if (adev->asic_type >= CHIP_STONEY) {
395 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8)); 400 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0, (adev->vce.gpu_addr >> 8));
401 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, (adev->vce.gpu_addr >> 8));
402 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR2, (adev->vce.gpu_addr >> 8));
403 } else
404 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8));
396 offset = AMDGPU_VCE_FIRMWARE_OFFSET; 405 offset = AMDGPU_VCE_FIRMWARE_OFFSET;
397 size = VCE_V3_0_FW_SIZE; 406 size = VCE_V3_0_FW_SIZE;
398 WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff); 407 WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff);
@@ -576,6 +585,11 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
576 struct amdgpu_iv_entry *entry) 585 struct amdgpu_iv_entry *entry)
577{ 586{
578 DRM_DEBUG("IH: VCE\n"); 587 DRM_DEBUG("IH: VCE\n");
588
589 WREG32_P(mmVCE_SYS_INT_STATUS,
590 VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK,
591 ~VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK);
592
579 switch (entry->src_data) { 593 switch (entry->src_data) {
580 case 0: 594 case 0:
581 amdgpu_fence_process(&adev->vce.ring[0]); 595 amdgpu_fence_process(&adev->vce.ring[0]);
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
index 144f50acc971..c89dc777768f 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
@@ -16,6 +16,8 @@ TRACE_EVENT(amd_sched_job,
16 TP_ARGS(sched_job), 16 TP_ARGS(sched_job),
17 TP_STRUCT__entry( 17 TP_STRUCT__entry(
18 __field(struct amd_sched_entity *, entity) 18 __field(struct amd_sched_entity *, entity)
19 __field(struct amd_sched_job *, sched_job)
20 __field(struct fence *, fence)
19 __field(const char *, name) 21 __field(const char *, name)
20 __field(u32, job_count) 22 __field(u32, job_count)
21 __field(int, hw_job_count) 23 __field(int, hw_job_count)
@@ -23,16 +25,32 @@ TRACE_EVENT(amd_sched_job,
23 25
24 TP_fast_assign( 26 TP_fast_assign(
25 __entry->entity = sched_job->s_entity; 27 __entry->entity = sched_job->s_entity;
28 __entry->sched_job = sched_job;
29 __entry->fence = &sched_job->s_fence->base;
26 __entry->name = sched_job->sched->name; 30 __entry->name = sched_job->sched->name;
27 __entry->job_count = kfifo_len( 31 __entry->job_count = kfifo_len(
28 &sched_job->s_entity->job_queue) / sizeof(sched_job); 32 &sched_job->s_entity->job_queue) / sizeof(sched_job);
29 __entry->hw_job_count = atomic_read( 33 __entry->hw_job_count = atomic_read(
30 &sched_job->sched->hw_rq_count); 34 &sched_job->sched->hw_rq_count);
31 ), 35 ),
32 TP_printk("entity=%p, ring=%s, job count:%u, hw job count:%d", 36 TP_printk("entity=%p, sched job=%p, fence=%p, ring=%s, job count:%u, hw job count:%d",
33 __entry->entity, __entry->name, __entry->job_count, 37 __entry->entity, __entry->sched_job, __entry->fence, __entry->name,
34 __entry->hw_job_count) 38 __entry->job_count, __entry->hw_job_count)
35); 39);
40
41TRACE_EVENT(amd_sched_process_job,
42 TP_PROTO(struct amd_sched_fence *fence),
43 TP_ARGS(fence),
44 TP_STRUCT__entry(
45 __field(struct fence *, fence)
46 ),
47
48 TP_fast_assign(
49 __entry->fence = &fence->base;
50 ),
51 TP_printk("fence=%p signaled", __entry->fence)
52);
53
36#endif 54#endif
37 55
38/* This part must be outside protection */ 56/* This part must be outside protection */
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 89619a5a4289..3a4820e863ec 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -30,10 +30,12 @@
30#define CREATE_TRACE_POINTS 30#define CREATE_TRACE_POINTS
31#include "gpu_sched_trace.h" 31#include "gpu_sched_trace.h"
32 32
33static struct amd_sched_job * 33static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
34amd_sched_entity_pop_job(struct amd_sched_entity *entity);
35static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); 34static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
36 35
36struct kmem_cache *sched_fence_slab;
37atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
38
37/* Initialize a given run queue struct */ 39/* Initialize a given run queue struct */
38static void amd_sched_rq_init(struct amd_sched_rq *rq) 40static void amd_sched_rq_init(struct amd_sched_rq *rq)
39{ 41{
@@ -61,36 +63,36 @@ static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
61} 63}
62 64
63/** 65/**
64 * Select next job from a specified run queue with round robin policy. 66 * Select an entity which could provide a job to run
65 * Return NULL if nothing available. 67 *
68 * @rq The run queue to check.
69 *
70 * Try to find a ready entity, returns NULL if none found.
66 */ 71 */
67static struct amd_sched_job * 72static struct amd_sched_entity *
68amd_sched_rq_select_job(struct amd_sched_rq *rq) 73amd_sched_rq_select_entity(struct amd_sched_rq *rq)
69{ 74{
70 struct amd_sched_entity *entity; 75 struct amd_sched_entity *entity;
71 struct amd_sched_job *sched_job;
72 76
73 spin_lock(&rq->lock); 77 spin_lock(&rq->lock);
74 78
75 entity = rq->current_entity; 79 entity = rq->current_entity;
76 if (entity) { 80 if (entity) {
77 list_for_each_entry_continue(entity, &rq->entities, list) { 81 list_for_each_entry_continue(entity, &rq->entities, list) {
78 sched_job = amd_sched_entity_pop_job(entity); 82 if (amd_sched_entity_is_ready(entity)) {
79 if (sched_job) {
80 rq->current_entity = entity; 83 rq->current_entity = entity;
81 spin_unlock(&rq->lock); 84 spin_unlock(&rq->lock);
82 return sched_job; 85 return entity;
83 } 86 }
84 } 87 }
85 } 88 }
86 89
87 list_for_each_entry(entity, &rq->entities, list) { 90 list_for_each_entry(entity, &rq->entities, list) {
88 91
89 sched_job = amd_sched_entity_pop_job(entity); 92 if (amd_sched_entity_is_ready(entity)) {
90 if (sched_job) {
91 rq->current_entity = entity; 93 rq->current_entity = entity;
92 spin_unlock(&rq->lock); 94 spin_unlock(&rq->lock);
93 return sched_job; 95 return entity;
94 } 96 }
95 97
96 if (entity == rq->current_entity) 98 if (entity == rq->current_entity)
@@ -174,6 +176,24 @@ static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
174} 176}
175 177
176/** 178/**
179 * Check if entity is ready
180 *
181 * @entity The pointer to a valid scheduler entity
182 *
183 * Return true if entity could provide a job.
184 */
185static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
186{
187 if (kfifo_is_empty(&entity->job_queue))
188 return false;
189
190 if (ACCESS_ONCE(entity->dependency))
191 return false;
192
193 return true;
194}
195
196/**
177 * Destroy a context entity 197 * Destroy a context entity
178 * 198 *
179 * @sched Pointer to scheduler instance 199 * @sched Pointer to scheduler instance
@@ -208,32 +228,53 @@ static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
208 amd_sched_wakeup(entity->sched); 228 amd_sched_wakeup(entity->sched);
209} 229}
210 230
231static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
232{
233 struct amd_gpu_scheduler *sched = entity->sched;
234 struct fence * fence = entity->dependency;
235 struct amd_sched_fence *s_fence;
236
237 if (fence->context == entity->fence_context) {
238 /* We can ignore fences from ourself */
239 fence_put(entity->dependency);
240 return false;
241 }
242
243 s_fence = to_amd_sched_fence(fence);
244 if (s_fence && s_fence->sched == sched) {
245 /* Fence is from the same scheduler */
246 if (test_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &fence->flags)) {
247 /* Ignore it when it is already scheduled */
248 fence_put(entity->dependency);
249 return false;
250 }
251
252 /* Wait for fence to be scheduled */
253 entity->cb.func = amd_sched_entity_wakeup;
254 list_add_tail(&entity->cb.node, &s_fence->scheduled_cb);
255 return true;
256 }
257
258 if (!fence_add_callback(entity->dependency, &entity->cb,
259 amd_sched_entity_wakeup))
260 return true;
261
262 fence_put(entity->dependency);
263 return false;
264}
265
211static struct amd_sched_job * 266static struct amd_sched_job *
212amd_sched_entity_pop_job(struct amd_sched_entity *entity) 267amd_sched_entity_pop_job(struct amd_sched_entity *entity)
213{ 268{
214 struct amd_gpu_scheduler *sched = entity->sched; 269 struct amd_gpu_scheduler *sched = entity->sched;
215 struct amd_sched_job *sched_job; 270 struct amd_sched_job *sched_job;
216 271
217 if (ACCESS_ONCE(entity->dependency))
218 return NULL;
219
220 if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job))) 272 if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
221 return NULL; 273 return NULL;
222 274
223 while ((entity->dependency = sched->ops->dependency(sched_job))) { 275 while ((entity->dependency = sched->ops->dependency(sched_job)))
224 276 if (amd_sched_entity_add_dependency_cb(entity))
225 if (entity->dependency->context == entity->fence_context) {
226 /* We can ignore fences from ourself */
227 fence_put(entity->dependency);
228 continue;
229 }
230
231 if (fence_add_callback(entity->dependency, &entity->cb,
232 amd_sched_entity_wakeup))
233 fence_put(entity->dependency);
234 else
235 return NULL; 277 return NULL;
236 }
237 278
238 return sched_job; 279 return sched_job;
239} 280}
@@ -247,6 +288,7 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity)
247 */ 288 */
248static bool amd_sched_entity_in(struct amd_sched_job *sched_job) 289static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
249{ 290{
291 struct amd_gpu_scheduler *sched = sched_job->sched;
250 struct amd_sched_entity *entity = sched_job->s_entity; 292 struct amd_sched_entity *entity = sched_job->s_entity;
251 bool added, first = false; 293 bool added, first = false;
252 294
@@ -261,7 +303,7 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
261 303
262 /* first job wakes up scheduler */ 304 /* first job wakes up scheduler */
263 if (first) 305 if (first)
264 amd_sched_wakeup(sched_job->sched); 306 amd_sched_wakeup(sched);
265 307
266 return added; 308 return added;
267} 309}
@@ -273,22 +315,13 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
273 * 315 *
274 * Returns 0 for success, negative error code otherwise. 316 * Returns 0 for success, negative error code otherwise.
275 */ 317 */
276int amd_sched_entity_push_job(struct amd_sched_job *sched_job) 318void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
277{ 319{
278 struct amd_sched_entity *entity = sched_job->s_entity; 320 struct amd_sched_entity *entity = sched_job->s_entity;
279 struct amd_sched_fence *fence = amd_sched_fence_create(
280 entity, sched_job->owner);
281
282 if (!fence)
283 return -ENOMEM;
284
285 fence_get(&fence->base);
286 sched_job->s_fence = fence;
287 321
322 trace_amd_sched_job(sched_job);
288 wait_event(entity->sched->job_scheduled, 323 wait_event(entity->sched->job_scheduled,
289 amd_sched_entity_in(sched_job)); 324 amd_sched_entity_in(sched_job));
290 trace_amd_sched_job(sched_job);
291 return 0;
292} 325}
293 326
294/** 327/**
@@ -310,22 +343,22 @@ static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
310} 343}
311 344
312/** 345/**
313 * Select next to run 346 * Select next entity to process
314*/ 347*/
315static struct amd_sched_job * 348static struct amd_sched_entity *
316amd_sched_select_job(struct amd_gpu_scheduler *sched) 349amd_sched_select_entity(struct amd_gpu_scheduler *sched)
317{ 350{
318 struct amd_sched_job *sched_job; 351 struct amd_sched_entity *entity;
319 352
320 if (!amd_sched_ready(sched)) 353 if (!amd_sched_ready(sched))
321 return NULL; 354 return NULL;
322 355
323 /* Kernel run queue has higher priority than normal run queue*/ 356 /* Kernel run queue has higher priority than normal run queue*/
324 sched_job = amd_sched_rq_select_job(&sched->kernel_rq); 357 entity = amd_sched_rq_select_entity(&sched->kernel_rq);
325 if (sched_job == NULL) 358 if (entity == NULL)
326 sched_job = amd_sched_rq_select_job(&sched->sched_rq); 359 entity = amd_sched_rq_select_entity(&sched->sched_rq);
327 360
328 return sched_job; 361 return entity;
329} 362}
330 363
331static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) 364static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
@@ -343,6 +376,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
343 list_del_init(&s_fence->list); 376 list_del_init(&s_fence->list);
344 spin_unlock_irqrestore(&sched->fence_list_lock, flags); 377 spin_unlock_irqrestore(&sched->fence_list_lock, flags);
345 } 378 }
379 trace_amd_sched_process_job(s_fence);
346 fence_put(&s_fence->base); 380 fence_put(&s_fence->base);
347 wake_up_interruptible(&sched->wake_up_worker); 381 wake_up_interruptible(&sched->wake_up_worker);
348} 382}
@@ -386,13 +420,16 @@ static int amd_sched_main(void *param)
386 unsigned long flags; 420 unsigned long flags;
387 421
388 wait_event_interruptible(sched->wake_up_worker, 422 wait_event_interruptible(sched->wake_up_worker,
389 kthread_should_stop() || 423 (entity = amd_sched_select_entity(sched)) ||
390 (sched_job = amd_sched_select_job(sched))); 424 kthread_should_stop());
391 425
426 if (!entity)
427 continue;
428
429 sched_job = amd_sched_entity_pop_job(entity);
392 if (!sched_job) 430 if (!sched_job)
393 continue; 431 continue;
394 432
395 entity = sched_job->s_entity;
396 s_fence = sched_job->s_fence; 433 s_fence = sched_job->s_fence;
397 434
398 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { 435 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
@@ -405,6 +442,7 @@ static int amd_sched_main(void *param)
405 442
406 atomic_inc(&sched->hw_rq_count); 443 atomic_inc(&sched->hw_rq_count);
407 fence = sched->ops->run_job(sched_job); 444 fence = sched->ops->run_job(sched_job);
445 amd_sched_fence_scheduled(s_fence);
408 if (fence) { 446 if (fence) {
409 r = fence_add_callback(fence, &s_fence->cb, 447 r = fence_add_callback(fence, &s_fence->cb,
410 amd_sched_process_job); 448 amd_sched_process_job);
@@ -450,6 +488,13 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
450 init_waitqueue_head(&sched->wake_up_worker); 488 init_waitqueue_head(&sched->wake_up_worker);
451 init_waitqueue_head(&sched->job_scheduled); 489 init_waitqueue_head(&sched->job_scheduled);
452 atomic_set(&sched->hw_rq_count, 0); 490 atomic_set(&sched->hw_rq_count, 0);
491 if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
492 sched_fence_slab = kmem_cache_create(
493 "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
494 SLAB_HWCACHE_ALIGN, NULL);
495 if (!sched_fence_slab)
496 return -ENOMEM;
497 }
453 498
454 /* Each scheduler will run on a seperate kernel thread */ 499 /* Each scheduler will run on a seperate kernel thread */
455 sched->thread = kthread_run(amd_sched_main, sched, sched->name); 500 sched->thread = kthread_run(amd_sched_main, sched, sched->name);
@@ -470,4 +515,6 @@ void amd_sched_fini(struct amd_gpu_scheduler *sched)
470{ 515{
471 if (sched->thread) 516 if (sched->thread)
472 kthread_stop(sched->thread); 517 kthread_stop(sched->thread);
518 if (atomic_dec_and_test(&sched_fence_slab_ref))
519 kmem_cache_destroy(sched_fence_slab);
473} 520}
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 929e9aced041..a0f0ae53aacd 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -27,9 +27,14 @@
27#include <linux/kfifo.h> 27#include <linux/kfifo.h>
28#include <linux/fence.h> 28#include <linux/fence.h>
29 29
30#define AMD_SCHED_FENCE_SCHEDULED_BIT FENCE_FLAG_USER_BITS
31
30struct amd_gpu_scheduler; 32struct amd_gpu_scheduler;
31struct amd_sched_rq; 33struct amd_sched_rq;
32 34
35extern struct kmem_cache *sched_fence_slab;
36extern atomic_t sched_fence_slab_ref;
37
33/** 38/**
34 * A scheduler entity is a wrapper around a job queue or a group 39 * A scheduler entity is a wrapper around a job queue or a group
35 * of other entities. Entities take turns emitting jobs from their 40 * of other entities. Entities take turns emitting jobs from their
@@ -65,6 +70,7 @@ struct amd_sched_rq {
65struct amd_sched_fence { 70struct amd_sched_fence {
66 struct fence base; 71 struct fence base;
67 struct fence_cb cb; 72 struct fence_cb cb;
73 struct list_head scheduled_cb;
68 struct amd_gpu_scheduler *sched; 74 struct amd_gpu_scheduler *sched;
69 spinlock_t lock; 75 spinlock_t lock;
70 void *owner; 76 void *owner;
@@ -76,7 +82,6 @@ struct amd_sched_job {
76 struct amd_gpu_scheduler *sched; 82 struct amd_gpu_scheduler *sched;
77 struct amd_sched_entity *s_entity; 83 struct amd_sched_entity *s_entity;
78 struct amd_sched_fence *s_fence; 84 struct amd_sched_fence *s_fence;
79 void *owner;
80}; 85};
81 86
82extern const struct fence_ops amd_sched_fence_ops; 87extern const struct fence_ops amd_sched_fence_ops;
@@ -128,11 +133,11 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
128 uint32_t jobs); 133 uint32_t jobs);
129void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, 134void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
130 struct amd_sched_entity *entity); 135 struct amd_sched_entity *entity);
131int amd_sched_entity_push_job(struct amd_sched_job *sched_job); 136void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
132 137
133struct amd_sched_fence *amd_sched_fence_create( 138struct amd_sched_fence *amd_sched_fence_create(
134 struct amd_sched_entity *s_entity, void *owner); 139 struct amd_sched_entity *s_entity, void *owner);
140void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
135void amd_sched_fence_signal(struct amd_sched_fence *fence); 141void amd_sched_fence_signal(struct amd_sched_fence *fence);
136 142
137
138#endif 143#endif
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
index d802638094f4..87c78eecea64 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -32,9 +32,11 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity
32 struct amd_sched_fence *fence = NULL; 32 struct amd_sched_fence *fence = NULL;
33 unsigned seq; 33 unsigned seq;
34 34
35 fence = kzalloc(sizeof(struct amd_sched_fence), GFP_KERNEL); 35 fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
36 if (fence == NULL) 36 if (fence == NULL)
37 return NULL; 37 return NULL;
38
39 INIT_LIST_HEAD(&fence->scheduled_cb);
38 fence->owner = owner; 40 fence->owner = owner;
39 fence->sched = s_entity->sched; 41 fence->sched = s_entity->sched;
40 spin_lock_init(&fence->lock); 42 spin_lock_init(&fence->lock);
@@ -55,6 +57,17 @@ void amd_sched_fence_signal(struct amd_sched_fence *fence)
55 FENCE_TRACE(&fence->base, "was already signaled\n"); 57 FENCE_TRACE(&fence->base, "was already signaled\n");
56} 58}
57 59
60void amd_sched_fence_scheduled(struct amd_sched_fence *s_fence)
61{
62 struct fence_cb *cur, *tmp;
63
64 set_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &s_fence->base.flags);
65 list_for_each_entry_safe(cur, tmp, &s_fence->scheduled_cb, node) {
66 list_del_init(&cur->node);
67 cur->func(&s_fence->base, cur);
68 }
69}
70
58static const char *amd_sched_fence_get_driver_name(struct fence *fence) 71static const char *amd_sched_fence_get_driver_name(struct fence *fence)
59{ 72{
60 return "amd_sched"; 73 return "amd_sched";
@@ -71,11 +84,17 @@ static bool amd_sched_fence_enable_signaling(struct fence *f)
71 return true; 84 return true;
72} 85}
73 86
87static void amd_sched_fence_release(struct fence *f)
88{
89 struct amd_sched_fence *fence = to_amd_sched_fence(f);
90 kmem_cache_free(sched_fence_slab, fence);
91}
92
74const struct fence_ops amd_sched_fence_ops = { 93const struct fence_ops amd_sched_fence_ops = {
75 .get_driver_name = amd_sched_fence_get_driver_name, 94 .get_driver_name = amd_sched_fence_get_driver_name,
76 .get_timeline_name = amd_sched_fence_get_timeline_name, 95 .get_timeline_name = amd_sched_fence_get_timeline_name,
77 .enable_signaling = amd_sched_fence_enable_signaling, 96 .enable_signaling = amd_sched_fence_enable_signaling,
78 .signaled = NULL, 97 .signaled = NULL,
79 .wait = fence_default_wait, 98 .wait = fence_default_wait,
80 .release = NULL, 99 .release = amd_sched_fence_release,
81}; 100};
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index cebcab560626..9bdc28cf927e 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -1216,14 +1216,14 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
1216 &armada_primary_plane_funcs, 1216 &armada_primary_plane_funcs,
1217 armada_primary_formats, 1217 armada_primary_formats,
1218 ARRAY_SIZE(armada_primary_formats), 1218 ARRAY_SIZE(armada_primary_formats),
1219 DRM_PLANE_TYPE_PRIMARY); 1219 DRM_PLANE_TYPE_PRIMARY, NULL);
1220 if (ret) { 1220 if (ret) {
1221 kfree(primary); 1221 kfree(primary);
1222 return ret; 1222 return ret;
1223 } 1223 }
1224 1224
1225 ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, &primary->base, NULL, 1225 ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, &primary->base, NULL,
1226 &armada_crtc_funcs); 1226 &armada_crtc_funcs, NULL);
1227 if (ret) 1227 if (ret)
1228 goto err_crtc_init; 1228 goto err_crtc_init;
1229 1229
diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c
index 1c90969def3e..5fa4bf20b232 100644
--- a/drivers/gpu/drm/armada/armada_fb.c
+++ b/drivers/gpu/drm/armada/armada_fb.c
@@ -35,7 +35,7 @@ static const struct drm_framebuffer_funcs armada_fb_funcs = {
35}; 35};
36 36
37struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev, 37struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
38 struct drm_mode_fb_cmd2 *mode, struct armada_gem_object *obj) 38 const struct drm_mode_fb_cmd2 *mode, struct armada_gem_object *obj)
39{ 39{
40 struct armada_framebuffer *dfb; 40 struct armada_framebuffer *dfb;
41 uint8_t format, config; 41 uint8_t format, config;
@@ -101,7 +101,7 @@ struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
101} 101}
102 102
103static struct drm_framebuffer *armada_fb_create(struct drm_device *dev, 103static struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
104 struct drm_file *dfile, struct drm_mode_fb_cmd2 *mode) 104 struct drm_file *dfile, const struct drm_mode_fb_cmd2 *mode)
105{ 105{
106 struct armada_gem_object *obj; 106 struct armada_gem_object *obj;
107 struct armada_framebuffer *dfb; 107 struct armada_framebuffer *dfb;
diff --git a/drivers/gpu/drm/armada/armada_fb.h b/drivers/gpu/drm/armada/armada_fb.h
index ce3f12ebfc53..48073c4f54d8 100644
--- a/drivers/gpu/drm/armada/armada_fb.h
+++ b/drivers/gpu/drm/armada/armada_fb.h
@@ -19,6 +19,6 @@ struct armada_framebuffer {
19#define drm_fb_obj(fb) drm_fb_to_armada_fb(fb)->obj 19#define drm_fb_obj(fb) drm_fb_to_armada_fb(fb)->obj
20 20
21struct armada_framebuffer *armada_framebuffer_create(struct drm_device *, 21struct armada_framebuffer *armada_framebuffer_create(struct drm_device *,
22 struct drm_mode_fb_cmd2 *, struct armada_gem_object *); 22 const struct drm_mode_fb_cmd2 *, struct armada_gem_object *);
23 23
24#endif 24#endif
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index 5c22b380f8f3..148e8a42b2c6 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -460,7 +460,7 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
460 &armada_ovl_plane_funcs, 460 &armada_ovl_plane_funcs,
461 armada_ovl_formats, 461 armada_ovl_formats,
462 ARRAY_SIZE(armada_ovl_formats), 462 ARRAY_SIZE(armada_ovl_formats),
463 DRM_PLANE_TYPE_OVERLAY); 463 DRM_PLANE_TYPE_OVERLAY, NULL);
464 if (ret) { 464 if (ret) {
465 kfree(dplane); 465 kfree(dplane);
466 return ret; 466 return ret;
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 05f6522c0457..eb5715994ac2 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -256,7 +256,6 @@ struct ast_framebuffer {
256struct ast_fbdev { 256struct ast_fbdev {
257 struct drm_fb_helper helper; 257 struct drm_fb_helper helper;
258 struct ast_framebuffer afb; 258 struct ast_framebuffer afb;
259 struct list_head fbdev_list;
260 void *sysram; 259 void *sysram;
261 int size; 260 int size;
262 struct ttm_bo_kmap_obj mapping; 261 struct ttm_bo_kmap_obj mapping;
@@ -309,7 +308,7 @@ extern void ast_mode_fini(struct drm_device *dev);
309 308
310int ast_framebuffer_init(struct drm_device *dev, 309int ast_framebuffer_init(struct drm_device *dev,
311 struct ast_framebuffer *ast_fb, 310 struct ast_framebuffer *ast_fb,
312 struct drm_mode_fb_cmd2 *mode_cmd, 311 const struct drm_mode_fb_cmd2 *mode_cmd,
313 struct drm_gem_object *obj); 312 struct drm_gem_object *obj);
314 313
315int ast_fbdev_init(struct drm_device *dev); 314int ast_fbdev_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index a37e7ea4a00c..5320f8c57884 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -163,7 +163,7 @@ static struct fb_ops astfb_ops = {
163}; 163};
164 164
165static int astfb_create_object(struct ast_fbdev *afbdev, 165static int astfb_create_object(struct ast_fbdev *afbdev,
166 struct drm_mode_fb_cmd2 *mode_cmd, 166 const struct drm_mode_fb_cmd2 *mode_cmd,
167 struct drm_gem_object **gobj_p) 167 struct drm_gem_object **gobj_p)
168{ 168{
169 struct drm_device *dev = afbdev->helper.dev; 169 struct drm_device *dev = afbdev->helper.dev;
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 541a610667ad..9759009d1da3 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -309,7 +309,7 @@ static const struct drm_framebuffer_funcs ast_fb_funcs = {
309 309
310int ast_framebuffer_init(struct drm_device *dev, 310int ast_framebuffer_init(struct drm_device *dev,
311 struct ast_framebuffer *ast_fb, 311 struct ast_framebuffer *ast_fb,
312 struct drm_mode_fb_cmd2 *mode_cmd, 312 const struct drm_mode_fb_cmd2 *mode_cmd,
313 struct drm_gem_object *obj) 313 struct drm_gem_object *obj)
314{ 314{
315 int ret; 315 int ret;
@@ -327,7 +327,7 @@ int ast_framebuffer_init(struct drm_device *dev,
327static struct drm_framebuffer * 327static struct drm_framebuffer *
328ast_user_framebuffer_create(struct drm_device *dev, 328ast_user_framebuffer_create(struct drm_device *dev,
329 struct drm_file *filp, 329 struct drm_file *filp,
330 struct drm_mode_fb_cmd2 *mode_cmd) 330 const struct drm_mode_fb_cmd2 *mode_cmd)
331{ 331{
332 struct drm_gem_object *obj; 332 struct drm_gem_object *obj;
333 struct ast_framebuffer *ast_fb; 333 struct ast_framebuffer *ast_fb;
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 69d19f3304a5..0123458cbd83 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -751,7 +751,7 @@ static int ast_encoder_init(struct drm_device *dev)
751 return -ENOMEM; 751 return -ENOMEM;
752 752
753 drm_encoder_init(dev, &ast_encoder->base, &ast_enc_funcs, 753 drm_encoder_init(dev, &ast_encoder->base, &ast_enc_funcs,
754 DRM_MODE_ENCODER_DAC); 754 DRM_MODE_ENCODER_DAC, NULL);
755 drm_encoder_helper_add(&ast_encoder->base, &ast_enc_helper_funcs); 755 drm_encoder_helper_add(&ast_encoder->base, &ast_enc_helper_funcs);
756 756
757 ast_encoder->base.possible_crtcs = 1; 757 ast_encoder->base.possible_crtcs = 1;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index 9f6e234e7029..468a14f266a7 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -344,7 +344,7 @@ int atmel_hlcdc_crtc_create(struct drm_device *dev)
344 ret = drm_crtc_init_with_planes(dev, &crtc->base, 344 ret = drm_crtc_init_with_planes(dev, &crtc->base,
345 &planes->primary->base, 345 &planes->primary->base,
346 planes->cursor ? &planes->cursor->base : NULL, 346 planes->cursor ? &planes->cursor->base : NULL,
347 &atmel_hlcdc_crtc_funcs); 347 &atmel_hlcdc_crtc_funcs, NULL);
348 if (ret < 0) 348 if (ret < 0)
349 goto fail; 349 goto fail;
350 350
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 244df0a440b7..816895447155 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -402,7 +402,7 @@ static irqreturn_t atmel_hlcdc_dc_irq_handler(int irq, void *data)
402} 402}
403 403
404static struct drm_framebuffer *atmel_hlcdc_fb_create(struct drm_device *dev, 404static struct drm_framebuffer *atmel_hlcdc_fb_create(struct drm_device *dev,
405 struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd) 405 struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd)
406{ 406{
407 return drm_fb_cma_create(dev, file_priv, mode_cmd); 407 return drm_fb_cma_create(dev, file_priv, mode_cmd);
408} 408}
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index 067e4c144bd6..d1129000c5cf 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -256,7 +256,7 @@ static int atmel_hlcdc_create_panel_output(struct drm_device *dev,
256 &atmel_hlcdc_panel_encoder_helper_funcs); 256 &atmel_hlcdc_panel_encoder_helper_funcs);
257 ret = drm_encoder_init(dev, &panel->base.encoder, 257 ret = drm_encoder_init(dev, &panel->base.encoder,
258 &atmel_hlcdc_panel_encoder_funcs, 258 &atmel_hlcdc_panel_encoder_funcs,
259 DRM_MODE_ENCODER_LVDS); 259 DRM_MODE_ENCODER_LVDS, NULL);
260 if (ret) 260 if (ret)
261 return ret; 261 return ret;
262 262
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index d0299aed517e..1ffe9c329c46 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -941,7 +941,7 @@ atmel_hlcdc_plane_create(struct drm_device *dev,
941 ret = drm_universal_plane_init(dev, &plane->base, 0, 941 ret = drm_universal_plane_init(dev, &plane->base, 0,
942 &layer_plane_funcs, 942 &layer_plane_funcs,
943 desc->formats->formats, 943 desc->formats->formats,
944 desc->formats->nformats, type); 944 desc->formats->nformats, type, NULL);
945 if (ret) 945 if (ret)
946 return ERR_PTR(ret); 946 return ERR_PTR(ret);
947 947
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h
index 71f2687fc3cc..19b5adaebe24 100644
--- a/drivers/gpu/drm/bochs/bochs.h
+++ b/drivers/gpu/drm/bochs/bochs.h
@@ -149,7 +149,7 @@ int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
149 149
150int bochs_framebuffer_init(struct drm_device *dev, 150int bochs_framebuffer_init(struct drm_device *dev,
151 struct bochs_framebuffer *gfb, 151 struct bochs_framebuffer *gfb,
152 struct drm_mode_fb_cmd2 *mode_cmd, 152 const struct drm_mode_fb_cmd2 *mode_cmd,
153 struct drm_gem_object *obj); 153 struct drm_gem_object *obj);
154int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr); 154int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr);
155int bochs_bo_unpin(struct bochs_bo *bo); 155int bochs_bo_unpin(struct bochs_bo *bo);
diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c
index 09a0637aab3e..7520bf81fc25 100644
--- a/drivers/gpu/drm/bochs/bochs_fbdev.c
+++ b/drivers/gpu/drm/bochs/bochs_fbdev.c
@@ -34,7 +34,7 @@ static struct fb_ops bochsfb_ops = {
34}; 34};
35 35
36static int bochsfb_create_object(struct bochs_device *bochs, 36static int bochsfb_create_object(struct bochs_device *bochs,
37 struct drm_mode_fb_cmd2 *mode_cmd, 37 const struct drm_mode_fb_cmd2 *mode_cmd,
38 struct drm_gem_object **gobj_p) 38 struct drm_gem_object **gobj_p)
39{ 39{
40 struct drm_device *dev = bochs->dev; 40 struct drm_device *dev = bochs->dev;
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 26bcd03a8cb6..a88be6dd34a4 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -196,7 +196,7 @@ static void bochs_encoder_init(struct drm_device *dev)
196 196
197 encoder->possible_crtcs = 0x1; 197 encoder->possible_crtcs = 0x1;
198 drm_encoder_init(dev, encoder, &bochs_encoder_encoder_funcs, 198 drm_encoder_init(dev, encoder, &bochs_encoder_encoder_funcs,
199 DRM_MODE_ENCODER_DAC); 199 DRM_MODE_ENCODER_DAC, NULL);
200 drm_encoder_helper_add(encoder, &bochs_encoder_helper_funcs); 200 drm_encoder_helper_add(encoder, &bochs_encoder_helper_funcs);
201} 201}
202 202
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index f69e6bf9bb0e..d812ad014da5 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -484,7 +484,7 @@ static const struct drm_framebuffer_funcs bochs_fb_funcs = {
484 484
485int bochs_framebuffer_init(struct drm_device *dev, 485int bochs_framebuffer_init(struct drm_device *dev,
486 struct bochs_framebuffer *gfb, 486 struct bochs_framebuffer *gfb,
487 struct drm_mode_fb_cmd2 *mode_cmd, 487 const struct drm_mode_fb_cmd2 *mode_cmd,
488 struct drm_gem_object *obj) 488 struct drm_gem_object *obj)
489{ 489{
490 int ret; 490 int ret;
@@ -502,7 +502,7 @@ int bochs_framebuffer_init(struct drm_device *dev,
502static struct drm_framebuffer * 502static struct drm_framebuffer *
503bochs_user_framebuffer_create(struct drm_device *dev, 503bochs_user_framebuffer_create(struct drm_device *dev,
504 struct drm_file *filp, 504 struct drm_file *filp,
505 struct drm_mode_fb_cmd2 *mode_cmd) 505 const struct drm_mode_fb_cmd2 *mode_cmd)
506{ 506{
507 struct drm_gem_object *obj; 507 struct drm_gem_object *obj;
508 struct bochs_framebuffer *bochs_fb; 508 struct bochs_framebuffer *bochs_fb;
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 705061537a27..b774d637a00f 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -153,7 +153,6 @@ struct cirrus_device {
153struct cirrus_fbdev { 153struct cirrus_fbdev {
154 struct drm_fb_helper helper; 154 struct drm_fb_helper helper;
155 struct cirrus_framebuffer gfb; 155 struct cirrus_framebuffer gfb;
156 struct list_head fbdev_list;
157 void *sysram; 156 void *sysram;
158 int size; 157 int size;
159 int x1, y1, x2, y2; /* dirty rect */ 158 int x1, y1, x2, y2; /* dirty rect */
@@ -207,7 +206,7 @@ int cirrus_dumb_create(struct drm_file *file,
207 206
208int cirrus_framebuffer_init(struct drm_device *dev, 207int cirrus_framebuffer_init(struct drm_device *dev,
209 struct cirrus_framebuffer *gfb, 208 struct cirrus_framebuffer *gfb,
210 struct drm_mode_fb_cmd2 *mode_cmd, 209 const struct drm_mode_fb_cmd2 *mode_cmd,
211 struct drm_gem_object *obj); 210 struct drm_gem_object *obj);
212 211
213bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height, 212bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height,
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 589103bcc06c..3b5be7272357 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -135,7 +135,7 @@ static struct fb_ops cirrusfb_ops = {
135}; 135};
136 136
137static int cirrusfb_create_object(struct cirrus_fbdev *afbdev, 137static int cirrusfb_create_object(struct cirrus_fbdev *afbdev,
138 struct drm_mode_fb_cmd2 *mode_cmd, 138 const struct drm_mode_fb_cmd2 *mode_cmd,
139 struct drm_gem_object **gobj_p) 139 struct drm_gem_object **gobj_p)
140{ 140{
141 struct drm_device *dev = afbdev->helper.dev; 141 struct drm_device *dev = afbdev->helper.dev;
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 055fd86ba717..0907715e90fd 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -29,7 +29,7 @@ static const struct drm_framebuffer_funcs cirrus_fb_funcs = {
29 29
30int cirrus_framebuffer_init(struct drm_device *dev, 30int cirrus_framebuffer_init(struct drm_device *dev,
31 struct cirrus_framebuffer *gfb, 31 struct cirrus_framebuffer *gfb,
32 struct drm_mode_fb_cmd2 *mode_cmd, 32 const struct drm_mode_fb_cmd2 *mode_cmd,
33 struct drm_gem_object *obj) 33 struct drm_gem_object *obj)
34{ 34{
35 int ret; 35 int ret;
@@ -47,7 +47,7 @@ int cirrus_framebuffer_init(struct drm_device *dev,
47static struct drm_framebuffer * 47static struct drm_framebuffer *
48cirrus_user_framebuffer_create(struct drm_device *dev, 48cirrus_user_framebuffer_create(struct drm_device *dev,
49 struct drm_file *filp, 49 struct drm_file *filp,
50 struct drm_mode_fb_cmd2 *mode_cmd) 50 const struct drm_mode_fb_cmd2 *mode_cmd)
51{ 51{
52 struct cirrus_device *cdev = dev->dev_private; 52 struct cirrus_device *cdev = dev->dev_private;
53 struct drm_gem_object *obj; 53 struct drm_gem_object *obj;
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index 61385f2298bf..276719e52153 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -489,7 +489,7 @@ static struct drm_encoder *cirrus_encoder_init(struct drm_device *dev)
489 encoder->possible_crtcs = 0x1; 489 encoder->possible_crtcs = 0x1;
490 490
491 drm_encoder_init(dev, encoder, &cirrus_encoder_encoder_funcs, 491 drm_encoder_init(dev, encoder, &cirrus_encoder_encoder_funcs,
492 DRM_MODE_ENCODER_DAC); 492 DRM_MODE_ENCODER_DAC, NULL);
493 drm_encoder_helper_add(encoder, &cirrus_encoder_helper_funcs); 493 drm_encoder_helper_add(encoder, &cirrus_encoder_helper_funcs);
494 494
495 return encoder; 495 return encoder;
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 7bb3845d9974..6a21e5c378c1 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -288,8 +288,8 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
288 state->crtcs[index] = crtc; 288 state->crtcs[index] = crtc;
289 crtc_state->state = state; 289 crtc_state->state = state;
290 290
291 DRM_DEBUG_ATOMIC("Added [CRTC:%d] %p state to %p\n", 291 DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n",
292 crtc->base.id, crtc_state, state); 292 crtc->base.id, crtc->name, crtc_state, state);
293 293
294 return crtc_state; 294 return crtc_state;
295} 295}
@@ -316,8 +316,7 @@ int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
316 if (mode && memcmp(&state->mode, mode, sizeof(*mode)) == 0) 316 if (mode && memcmp(&state->mode, mode, sizeof(*mode)) == 0)
317 return 0; 317 return 0;
318 318
319 if (state->mode_blob) 319 drm_property_unreference_blob(state->mode_blob);
320 drm_property_unreference_blob(state->mode_blob);
321 state->mode_blob = NULL; 320 state->mode_blob = NULL;
322 321
323 if (mode) { 322 if (mode) {
@@ -363,8 +362,7 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
363 if (blob == state->mode_blob) 362 if (blob == state->mode_blob)
364 return 0; 363 return 0;
365 364
366 if (state->mode_blob) 365 drm_property_unreference_blob(state->mode_blob);
367 drm_property_unreference_blob(state->mode_blob);
368 state->mode_blob = NULL; 366 state->mode_blob = NULL;
369 367
370 if (blob) { 368 if (blob) {
@@ -419,8 +417,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
419 struct drm_property_blob *mode = 417 struct drm_property_blob *mode =
420 drm_property_lookup_blob(dev, val); 418 drm_property_lookup_blob(dev, val);
421 ret = drm_atomic_set_mode_prop_for_crtc(state, mode); 419 ret = drm_atomic_set_mode_prop_for_crtc(state, mode);
422 if (mode) 420 drm_property_unreference_blob(mode);
423 drm_property_unreference_blob(mode);
424 return ret; 421 return ret;
425 } 422 }
426 else if (crtc->funcs->atomic_set_property) 423 else if (crtc->funcs->atomic_set_property)
@@ -432,11 +429,20 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
432} 429}
433EXPORT_SYMBOL(drm_atomic_crtc_set_property); 430EXPORT_SYMBOL(drm_atomic_crtc_set_property);
434 431
435/* 432/**
433 * drm_atomic_crtc_get_property - get property value from CRTC state
434 * @crtc: the drm CRTC to set a property on
435 * @state: the state object to get the property value from
436 * @property: the property to set
437 * @val: return location for the property value
438 *
436 * This function handles generic/core properties and calls out to 439 * This function handles generic/core properties and calls out to
437 * driver's ->atomic_get_property() for driver properties. To ensure 440 * driver's ->atomic_get_property() for driver properties. To ensure
438 * consistent behavior you must call this function rather than the 441 * consistent behavior you must call this function rather than the
439 * driver hook directly. 442 * driver hook directly.
443 *
444 * RETURNS:
445 * Zero on success, error code on failure
440 */ 446 */
441static int 447static int
442drm_atomic_crtc_get_property(struct drm_crtc *crtc, 448drm_atomic_crtc_get_property(struct drm_crtc *crtc,
@@ -480,8 +486,8 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc,
480 */ 486 */
481 487
482 if (state->active && !state->enable) { 488 if (state->active && !state->enable) {
483 DRM_DEBUG_ATOMIC("[CRTC:%d] active without enabled\n", 489 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n",
484 crtc->base.id); 490 crtc->base.id, crtc->name);
485 return -EINVAL; 491 return -EINVAL;
486 } 492 }
487 493
@@ -490,15 +496,15 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc,
490 * be able to trigger. */ 496 * be able to trigger. */
491 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && 497 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
492 WARN_ON(state->enable && !state->mode_blob)) { 498 WARN_ON(state->enable && !state->mode_blob)) {
493 DRM_DEBUG_ATOMIC("[CRTC:%d] enabled without mode blob\n", 499 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n",
494 crtc->base.id); 500 crtc->base.id, crtc->name);
495 return -EINVAL; 501 return -EINVAL;
496 } 502 }
497 503
498 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && 504 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
499 WARN_ON(!state->enable && state->mode_blob)) { 505 WARN_ON(!state->enable && state->mode_blob)) {
500 DRM_DEBUG_ATOMIC("[CRTC:%d] disabled with mode blob\n", 506 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n",
501 crtc->base.id); 507 crtc->base.id, crtc->name);
502 return -EINVAL; 508 return -EINVAL;
503 } 509 }
504 510
@@ -543,8 +549,8 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
543 state->planes[index] = plane; 549 state->planes[index] = plane;
544 plane_state->state = state; 550 plane_state->state = state;
545 551
546 DRM_DEBUG_ATOMIC("Added [PLANE:%d] %p state to %p\n", 552 DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n",
547 plane->base.id, plane_state, state); 553 plane->base.id, plane->name, plane_state, state);
548 554
549 if (plane_state->crtc) { 555 if (plane_state->crtc) {
550 struct drm_crtc_state *crtc_state; 556 struct drm_crtc_state *crtc_state;
@@ -619,11 +625,20 @@ int drm_atomic_plane_set_property(struct drm_plane *plane,
619} 625}
620EXPORT_SYMBOL(drm_atomic_plane_set_property); 626EXPORT_SYMBOL(drm_atomic_plane_set_property);
621 627
622/* 628/**
629 * drm_atomic_plane_get_property - get property value from plane state
630 * @plane: the drm plane to set a property on
631 * @state: the state object to get the property value from
632 * @property: the property to set
633 * @val: return location for the property value
634 *
623 * This function handles generic/core properties and calls out to 635 * This function handles generic/core properties and calls out to
624 * driver's ->atomic_get_property() for driver properties. To ensure 636 * driver's ->atomic_get_property() for driver properties. To ensure
625 * consistent behavior you must call this function rather than the 637 * consistent behavior you must call this function rather than the
626 * driver hook directly. 638 * driver hook directly.
639 *
640 * RETURNS:
641 * Zero on success, error code on failure
627 */ 642 */
628static int 643static int
629drm_atomic_plane_get_property(struct drm_plane *plane, 644drm_atomic_plane_get_property(struct drm_plane *plane,
@@ -755,8 +770,8 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
755 } 770 }
756 771
757 if (plane_switching_crtc(state->state, plane, state)) { 772 if (plane_switching_crtc(state->state, plane, state)) {
758 DRM_DEBUG_ATOMIC("[PLANE:%d] switching CRTC directly\n", 773 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n",
759 plane->base.id); 774 plane->base.id, plane->name);
760 return -EINVAL; 775 return -EINVAL;
761 } 776 }
762 777
@@ -875,11 +890,20 @@ int drm_atomic_connector_set_property(struct drm_connector *connector,
875} 890}
876EXPORT_SYMBOL(drm_atomic_connector_set_property); 891EXPORT_SYMBOL(drm_atomic_connector_set_property);
877 892
878/* 893/**
894 * drm_atomic_connector_get_property - get property value from connector state
895 * @connector: the drm connector to set a property on
896 * @state: the state object to get the property value from
897 * @property: the property to set
898 * @val: return location for the property value
899 *
879 * This function handles generic/core properties and calls out to 900 * This function handles generic/core properties and calls out to
880 * driver's ->atomic_get_property() for driver properties. To ensure 901 * driver's ->atomic_get_property() for driver properties. To ensure
881 * consistent behavior you must call this function rather than the 902 * consistent behavior you must call this function rather than the
882 * driver hook directly. 903 * driver hook directly.
904 *
905 * RETURNS:
906 * Zero on success, error code on failure
883 */ 907 */
884static int 908static int
885drm_atomic_connector_get_property(struct drm_connector *connector, 909drm_atomic_connector_get_property(struct drm_connector *connector,
@@ -980,8 +1004,8 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
980 } 1004 }
981 1005
982 if (crtc) 1006 if (crtc)
983 DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d]\n", 1007 DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d:%s]\n",
984 plane_state, crtc->base.id); 1008 plane_state, crtc->base.id, crtc->name);
985 else 1009 else
986 DRM_DEBUG_ATOMIC("Link plane state %p to [NOCRTC]\n", 1010 DRM_DEBUG_ATOMIC("Link plane state %p to [NOCRTC]\n",
987 plane_state); 1011 plane_state);
@@ -1048,8 +1072,8 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
1048 conn_state->crtc = crtc; 1072 conn_state->crtc = crtc;
1049 1073
1050 if (crtc) 1074 if (crtc)
1051 DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d]\n", 1075 DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d:%s]\n",
1052 conn_state, crtc->base.id); 1076 conn_state, crtc->base.id, crtc->name);
1053 else 1077 else
1054 DRM_DEBUG_ATOMIC("Link connector state %p to [NOCRTC]\n", 1078 DRM_DEBUG_ATOMIC("Link connector state %p to [NOCRTC]\n",
1055 conn_state); 1079 conn_state);
@@ -1088,8 +1112,8 @@ drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
1088 if (ret) 1112 if (ret)
1089 return ret; 1113 return ret;
1090 1114
1091 DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d] to %p\n", 1115 DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d:%s] to %p\n",
1092 crtc->base.id, state); 1116 crtc->base.id, crtc->name, state);
1093 1117
1094 /* 1118 /*
1095 * Changed connectors are already in @state, so only need to look at the 1119 * Changed connectors are already in @state, so only need to look at the
@@ -1169,8 +1193,9 @@ drm_atomic_connectors_for_crtc(struct drm_atomic_state *state,
1169 num_connected_connectors++; 1193 num_connected_connectors++;
1170 } 1194 }
1171 1195
1172 DRM_DEBUG_ATOMIC("State %p has %i connectors for [CRTC:%d]\n", 1196 DRM_DEBUG_ATOMIC("State %p has %i connectors for [CRTC:%d:%s]\n",
1173 state, num_connected_connectors, crtc->base.id); 1197 state, num_connected_connectors,
1198 crtc->base.id, crtc->name);
1174 1199
1175 return num_connected_connectors; 1200 return num_connected_connectors;
1176} 1201}
@@ -1191,12 +1216,7 @@ void drm_atomic_legacy_backoff(struct drm_atomic_state *state)
1191retry: 1216retry:
1192 drm_modeset_backoff(state->acquire_ctx); 1217 drm_modeset_backoff(state->acquire_ctx);
1193 1218
1194 ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex, 1219 ret = drm_modeset_lock_all_ctx(state->dev, state->acquire_ctx);
1195 state->acquire_ctx);
1196 if (ret)
1197 goto retry;
1198 ret = drm_modeset_lock_all_crtcs(state->dev,
1199 state->acquire_ctx);
1200 if (ret) 1220 if (ret)
1201 goto retry; 1221 goto retry;
1202} 1222}
@@ -1228,8 +1248,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
1228 for_each_plane_in_state(state, plane, plane_state, i) { 1248 for_each_plane_in_state(state, plane, plane_state, i) {
1229 ret = drm_atomic_plane_check(plane, plane_state); 1249 ret = drm_atomic_plane_check(plane, plane_state);
1230 if (ret) { 1250 if (ret) {
1231 DRM_DEBUG_ATOMIC("[PLANE:%d] atomic core check failed\n", 1251 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n",
1232 plane->base.id); 1252 plane->base.id, plane->name);
1233 return ret; 1253 return ret;
1234 } 1254 }
1235 } 1255 }
@@ -1237,8 +1257,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
1237 for_each_crtc_in_state(state, crtc, crtc_state, i) { 1257 for_each_crtc_in_state(state, crtc, crtc_state, i) {
1238 ret = drm_atomic_crtc_check(crtc, crtc_state); 1258 ret = drm_atomic_crtc_check(crtc, crtc_state);
1239 if (ret) { 1259 if (ret) {
1240 DRM_DEBUG_ATOMIC("[CRTC:%d] atomic core check failed\n", 1260 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n",
1241 crtc->base.id); 1261 crtc->base.id, crtc->name);
1242 return ret; 1262 return ret;
1243 } 1263 }
1244 } 1264 }
@@ -1249,8 +1269,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
1249 if (!state->allow_modeset) { 1269 if (!state->allow_modeset) {
1250 for_each_crtc_in_state(state, crtc, crtc_state, i) { 1270 for_each_crtc_in_state(state, crtc, crtc_state, i) {
1251 if (drm_atomic_crtc_needs_modeset(crtc_state)) { 1271 if (drm_atomic_crtc_needs_modeset(crtc_state)) {
1252 DRM_DEBUG_ATOMIC("[CRTC:%d] requires full modeset\n", 1272 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n",
1253 crtc->base.id); 1273 crtc->base.id, crtc->name);
1254 return -EINVAL; 1274 return -EINVAL;
1255 } 1275 }
1256 } 1276 }
@@ -1432,6 +1452,45 @@ static int atomic_set_prop(struct drm_atomic_state *state,
1432 return ret; 1452 return ret;
1433} 1453}
1434 1454
1455/**
1456 * drm_atomic_clean_old_fb -- Unset old_fb pointers and set plane->fb pointers.
1457 *
1458 * @dev: drm device to check.
1459 * @plane_mask: plane mask for planes that were updated.
1460 * @ret: return value, can be -EDEADLK for a retry.
1461 *
1462 * Before doing an update plane->old_fb is set to plane->fb,
1463 * but before dropping the locks old_fb needs to be set to NULL
1464 * and plane->fb updated. This is a common operation for each
1465 * atomic update, so this call is split off as a helper.
1466 */
1467void drm_atomic_clean_old_fb(struct drm_device *dev,
1468 unsigned plane_mask,
1469 int ret)
1470{
1471 struct drm_plane *plane;
1472
1473 /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping
1474 * locks (ie. while it is still safe to deref plane->state). We
1475 * need to do this here because the driver entry points cannot
1476 * distinguish between legacy and atomic ioctls.
1477 */
1478 drm_for_each_plane_mask(plane, dev, plane_mask) {
1479 if (ret == 0) {
1480 struct drm_framebuffer *new_fb = plane->state->fb;
1481 if (new_fb)
1482 drm_framebuffer_reference(new_fb);
1483 plane->fb = new_fb;
1484 plane->crtc = plane->state->crtc;
1485
1486 if (plane->old_fb)
1487 drm_framebuffer_unreference(plane->old_fb);
1488 }
1489 plane->old_fb = NULL;
1490 }
1491}
1492EXPORT_SYMBOL(drm_atomic_clean_old_fb);
1493
1435int drm_mode_atomic_ioctl(struct drm_device *dev, 1494int drm_mode_atomic_ioctl(struct drm_device *dev,
1436 void *data, struct drm_file *file_priv) 1495 void *data, struct drm_file *file_priv)
1437{ 1496{
@@ -1446,7 +1505,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
1446 struct drm_plane *plane; 1505 struct drm_plane *plane;
1447 struct drm_crtc *crtc; 1506 struct drm_crtc *crtc;
1448 struct drm_crtc_state *crtc_state; 1507 struct drm_crtc_state *crtc_state;
1449 unsigned plane_mask = 0; 1508 unsigned plane_mask;
1450 int ret = 0; 1509 int ret = 0;
1451 unsigned int i, j; 1510 unsigned int i, j;
1452 1511
@@ -1486,6 +1545,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
1486 state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET); 1545 state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
1487 1546
1488retry: 1547retry:
1548 plane_mask = 0;
1489 copied_objs = 0; 1549 copied_objs = 0;
1490 copied_props = 0; 1550 copied_props = 0;
1491 1551
@@ -1576,24 +1636,7 @@ retry:
1576 } 1636 }
1577 1637
1578out: 1638out:
1579 /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping 1639 drm_atomic_clean_old_fb(dev, plane_mask, ret);
1580 * locks (ie. while it is still safe to deref plane->state). We
1581 * need to do this here because the driver entry points cannot
1582 * distinguish between legacy and atomic ioctls.
1583 */
1584 drm_for_each_plane_mask(plane, dev, plane_mask) {
1585 if (ret == 0) {
1586 struct drm_framebuffer *new_fb = plane->state->fb;
1587 if (new_fb)
1588 drm_framebuffer_reference(new_fb);
1589 plane->fb = new_fb;
1590 plane->crtc = plane->state->crtc;
1591
1592 if (plane->old_fb)
1593 drm_framebuffer_unreference(plane->old_fb);
1594 }
1595 plane->old_fb = NULL;
1596 }
1597 1640
1598 if (ret && arg->flags & DRM_MODE_PAGE_FLIP_EVENT) { 1641 if (ret && arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
1599 /* 1642 /*
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 0c6f62168776..63f925b75357 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -52,6 +52,12 @@
52 * drm_atomic_helper_disable_plane(), drm_atomic_helper_disable_plane() and the 52 * drm_atomic_helper_disable_plane(), drm_atomic_helper_disable_plane() and the
53 * various functions to implement set_property callbacks. New drivers must not 53 * various functions to implement set_property callbacks. New drivers must not
54 * implement these functions themselves but must use the provided helpers. 54 * implement these functions themselves but must use the provided helpers.
55 *
56 * The atomic helper uses the same function table structures as all other
57 * modesetting helpers. See the documentation for struct &drm_crtc_helper_funcs,
58 * struct &drm_encoder_helper_funcs and struct &drm_connector_helper_funcs. It
59 * also shares the struct &drm_plane_helper_funcs function table with the plane
60 * helpers.
55 */ 61 */
56static void 62static void
57drm_atomic_helper_plane_changed(struct drm_atomic_state *state, 63drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
@@ -80,6 +86,27 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
80 } 86 }
81} 87}
82 88
89static bool
90check_pending_encoder_assignment(struct drm_atomic_state *state,
91 struct drm_encoder *new_encoder,
92 struct drm_connector *new_connector)
93{
94 struct drm_connector *connector;
95 struct drm_connector_state *conn_state;
96 int i;
97
98 for_each_connector_in_state(state, connector, conn_state, i) {
99 if (conn_state->best_encoder != new_encoder)
100 continue;
101
102 /* encoder already assigned and we're trying to re-steal it! */
103 if (connector->state->best_encoder != conn_state->best_encoder)
104 return false;
105 }
106
107 return true;
108}
109
83static struct drm_crtc * 110static struct drm_crtc *
84get_current_crtc_for_encoder(struct drm_device *dev, 111get_current_crtc_for_encoder(struct drm_device *dev,
85 struct drm_encoder *encoder) 112 struct drm_encoder *encoder)
@@ -116,9 +143,9 @@ steal_encoder(struct drm_atomic_state *state,
116 */ 143 */
117 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex)); 144 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
118 145
119 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d], stealing it\n", 146 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n",
120 encoder->base.id, encoder->name, 147 encoder->base.id, encoder->name,
121 encoder_crtc->base.id); 148 encoder_crtc->base.id, encoder_crtc->name);
122 149
123 crtc_state = drm_atomic_get_crtc_state(state, encoder_crtc); 150 crtc_state = drm_atomic_get_crtc_state(state, encoder_crtc);
124 if (IS_ERR(crtc_state)) 151 if (IS_ERR(crtc_state))
@@ -210,17 +237,33 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
210 return -EINVAL; 237 return -EINVAL;
211 } 238 }
212 239
240 if (!drm_encoder_crtc_ok(new_encoder, connector_state->crtc)) {
241 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] incompatible with [CRTC:%d]\n",
242 new_encoder->base.id,
243 new_encoder->name,
244 connector_state->crtc->base.id);
245 return -EINVAL;
246 }
247
213 if (new_encoder == connector_state->best_encoder) { 248 if (new_encoder == connector_state->best_encoder) {
214 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n", 249 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n",
215 connector->base.id, 250 connector->base.id,
216 connector->name, 251 connector->name,
217 new_encoder->base.id, 252 new_encoder->base.id,
218 new_encoder->name, 253 new_encoder->name,
219 connector_state->crtc->base.id); 254 connector_state->crtc->base.id,
255 connector_state->crtc->name);
220 256
221 return 0; 257 return 0;
222 } 258 }
223 259
260 if (!check_pending_encoder_assignment(state, new_encoder, connector)) {
261 DRM_DEBUG_ATOMIC("Encoder for [CONNECTOR:%d:%s] already assigned\n",
262 connector->base.id,
263 connector->name);
264 return -EINVAL;
265 }
266
224 encoder_crtc = get_current_crtc_for_encoder(state->dev, 267 encoder_crtc = get_current_crtc_for_encoder(state->dev,
225 new_encoder); 268 new_encoder);
226 269
@@ -243,12 +286,13 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
243 crtc_state = state->crtc_states[idx]; 286 crtc_state = state->crtc_states[idx];
244 crtc_state->connectors_changed = true; 287 crtc_state->connectors_changed = true;
245 288
246 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n", 289 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n",
247 connector->base.id, 290 connector->base.id,
248 connector->name, 291 connector->name,
249 new_encoder->base.id, 292 new_encoder->base.id,
250 new_encoder->name, 293 new_encoder->name,
251 connector_state->crtc->base.id); 294 connector_state->crtc->base.id,
295 connector_state->crtc->name);
252 296
253 return 0; 297 return 0;
254} 298}
@@ -332,8 +376,8 @@ mode_fixup(struct drm_atomic_state *state)
332 ret = funcs->mode_fixup(crtc, &crtc_state->mode, 376 ret = funcs->mode_fixup(crtc, &crtc_state->mode,
333 &crtc_state->adjusted_mode); 377 &crtc_state->adjusted_mode);
334 if (!ret) { 378 if (!ret) {
335 DRM_DEBUG_ATOMIC("[CRTC:%d] fixup failed\n", 379 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] fixup failed\n",
336 crtc->base.id); 380 crtc->base.id, crtc->name);
337 return -EINVAL; 381 return -EINVAL;
338 } 382 }
339 } 383 }
@@ -380,14 +424,14 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
380 424
381 for_each_crtc_in_state(state, crtc, crtc_state, i) { 425 for_each_crtc_in_state(state, crtc, crtc_state, i) {
382 if (!drm_mode_equal(&crtc->state->mode, &crtc_state->mode)) { 426 if (!drm_mode_equal(&crtc->state->mode, &crtc_state->mode)) {
383 DRM_DEBUG_ATOMIC("[CRTC:%d] mode changed\n", 427 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n",
384 crtc->base.id); 428 crtc->base.id, crtc->name);
385 crtc_state->mode_changed = true; 429 crtc_state->mode_changed = true;
386 } 430 }
387 431
388 if (crtc->state->enable != crtc_state->enable) { 432 if (crtc->state->enable != crtc_state->enable) {
389 DRM_DEBUG_ATOMIC("[CRTC:%d] enable changed\n", 433 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enable changed\n",
390 crtc->base.id); 434 crtc->base.id, crtc->name);
391 435
392 /* 436 /*
393 * For clarity this assignment is done here, but 437 * For clarity this assignment is done here, but
@@ -428,18 +472,18 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
428 * a full modeset because update_connector_routing force that. 472 * a full modeset because update_connector_routing force that.
429 */ 473 */
430 if (crtc->state->active != crtc_state->active) { 474 if (crtc->state->active != crtc_state->active) {
431 DRM_DEBUG_ATOMIC("[CRTC:%d] active changed\n", 475 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active changed\n",
432 crtc->base.id); 476 crtc->base.id, crtc->name);
433 crtc_state->active_changed = true; 477 crtc_state->active_changed = true;
434 } 478 }
435 479
436 if (!drm_atomic_crtc_needs_modeset(crtc_state)) 480 if (!drm_atomic_crtc_needs_modeset(crtc_state))
437 continue; 481 continue;
438 482
439 DRM_DEBUG_ATOMIC("[CRTC:%d] needs all connectors, enable: %c, active: %c\n", 483 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] needs all connectors, enable: %c, active: %c\n",
440 crtc->base.id, 484 crtc->base.id, crtc->name,
441 crtc_state->enable ? 'y' : 'n', 485 crtc_state->enable ? 'y' : 'n',
442 crtc_state->active ? 'y' : 'n'); 486 crtc_state->active ? 'y' : 'n');
443 487
444 ret = drm_atomic_add_affected_connectors(state, crtc); 488 ret = drm_atomic_add_affected_connectors(state, crtc);
445 if (ret != 0) 489 if (ret != 0)
@@ -453,8 +497,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
453 crtc); 497 crtc);
454 498
455 if (crtc_state->enable != !!num_connectors) { 499 if (crtc_state->enable != !!num_connectors) {
456 DRM_DEBUG_ATOMIC("[CRTC:%d] enabled/connectors mismatch\n", 500 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled/connectors mismatch\n",
457 crtc->base.id); 501 crtc->base.id, crtc->name);
458 502
459 return -EINVAL; 503 return -EINVAL;
460 } 504 }
@@ -501,8 +545,8 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
501 545
502 ret = funcs->atomic_check(plane, plane_state); 546 ret = funcs->atomic_check(plane, plane_state);
503 if (ret) { 547 if (ret) {
504 DRM_DEBUG_ATOMIC("[PLANE:%d] atomic driver check failed\n", 548 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n",
505 plane->base.id); 549 plane->base.id, plane->name);
506 return ret; 550 return ret;
507 } 551 }
508 } 552 }
@@ -517,8 +561,8 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
517 561
518 ret = funcs->atomic_check(crtc, state->crtc_states[i]); 562 ret = funcs->atomic_check(crtc, state->crtc_states[i]);
519 if (ret) { 563 if (ret) {
520 DRM_DEBUG_ATOMIC("[CRTC:%d] atomic driver check failed\n", 564 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
521 crtc->base.id); 565 crtc->base.id, crtc->name);
522 return ret; 566 return ret;
523 } 567 }
524 } 568 }
@@ -631,8 +675,8 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
631 675
632 funcs = crtc->helper_private; 676 funcs = crtc->helper_private;
633 677
634 DRM_DEBUG_ATOMIC("disabling [CRTC:%d]\n", 678 DRM_DEBUG_ATOMIC("disabling [CRTC:%d:%s]\n",
635 crtc->base.id); 679 crtc->base.id, crtc->name);
636 680
637 681
638 /* Right function depends upon target state. */ 682 /* Right function depends upon target state. */
@@ -743,8 +787,8 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
743 funcs = crtc->helper_private; 787 funcs = crtc->helper_private;
744 788
745 if (crtc->state->enable && funcs->mode_set_nofb) { 789 if (crtc->state->enable && funcs->mode_set_nofb) {
746 DRM_DEBUG_ATOMIC("modeset on [CRTC:%d]\n", 790 DRM_DEBUG_ATOMIC("modeset on [CRTC:%d:%s]\n",
747 crtc->base.id); 791 crtc->base.id, crtc->name);
748 792
749 funcs->mode_set_nofb(crtc); 793 funcs->mode_set_nofb(crtc);
750 } 794 }
@@ -843,8 +887,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
843 funcs = crtc->helper_private; 887 funcs = crtc->helper_private;
844 888
845 if (crtc->state->enable) { 889 if (crtc->state->enable) {
846 DRM_DEBUG_ATOMIC("enabling [CRTC:%d]\n", 890 DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n",
847 crtc->base.id); 891 crtc->base.id, crtc->name);
848 892
849 if (funcs->enable) 893 if (funcs->enable)
850 funcs->enable(crtc); 894 funcs->enable(crtc);
@@ -1334,6 +1378,49 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
1334EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc); 1378EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc);
1335 1379
1336/** 1380/**
1381 * drm_atomic_helper_disable_planes_on_crtc - helper to disable CRTC's planes
1382 * @crtc: CRTC
1383 * @atomic: if set, synchronize with CRTC's atomic_begin/flush hooks
1384 *
1385 * Disables all planes associated with the given CRTC. This can be
1386 * used for instance in the CRTC helper disable callback to disable
1387 * all planes before shutting down the display pipeline.
1388 *
1389 * If the atomic-parameter is set the function calls the CRTC's
1390 * atomic_begin hook before and atomic_flush hook after disabling the
1391 * planes.
1392 *
1393 * It is a bug to call this function without having implemented the
1394 * ->atomic_disable() plane hook.
1395 */
1396void drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc *crtc,
1397 bool atomic)
1398{
1399 const struct drm_crtc_helper_funcs *crtc_funcs =
1400 crtc->helper_private;
1401 struct drm_plane *plane;
1402
1403 if (atomic && crtc_funcs && crtc_funcs->atomic_begin)
1404 crtc_funcs->atomic_begin(crtc, NULL);
1405
1406 drm_for_each_plane(plane, crtc->dev) {
1407 const struct drm_plane_helper_funcs *plane_funcs =
1408 plane->helper_private;
1409
1410 if (plane->state->crtc != crtc || !plane_funcs)
1411 continue;
1412
1413 WARN_ON(!plane_funcs->atomic_disable);
1414 if (plane_funcs->atomic_disable)
1415 plane_funcs->atomic_disable(plane, NULL);
1416 }
1417
1418 if (atomic && crtc_funcs && crtc_funcs->atomic_flush)
1419 crtc_funcs->atomic_flush(crtc, NULL);
1420}
1421EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc);
1422
1423/**
1337 * drm_atomic_helper_cleanup_planes - cleanup plane resources after commit 1424 * drm_atomic_helper_cleanup_planes - cleanup plane resources after commit
1338 * @dev: DRM device 1425 * @dev: DRM device
1339 * @old_state: atomic state object with old state structures 1426 * @old_state: atomic state object with old state structures
@@ -1477,12 +1564,12 @@ retry:
1477 drm_atomic_set_fb_for_plane(plane_state, fb); 1564 drm_atomic_set_fb_for_plane(plane_state, fb);
1478 plane_state->crtc_x = crtc_x; 1565 plane_state->crtc_x = crtc_x;
1479 plane_state->crtc_y = crtc_y; 1566 plane_state->crtc_y = crtc_y;
1480 plane_state->crtc_h = crtc_h;
1481 plane_state->crtc_w = crtc_w; 1567 plane_state->crtc_w = crtc_w;
1568 plane_state->crtc_h = crtc_h;
1482 plane_state->src_x = src_x; 1569 plane_state->src_x = src_x;
1483 plane_state->src_y = src_y; 1570 plane_state->src_y = src_y;
1484 plane_state->src_h = src_h;
1485 plane_state->src_w = src_w; 1571 plane_state->src_w = src_w;
1572 plane_state->src_h = src_h;
1486 1573
1487 if (plane == crtc->cursor) 1574 if (plane == crtc->cursor)
1488 state->legacy_cursor_update = true; 1575 state->legacy_cursor_update = true;
@@ -1553,6 +1640,9 @@ retry:
1553 goto fail; 1640 goto fail;
1554 } 1641 }
1555 1642
1643 if (plane_state->crtc && (plane == plane->crtc->cursor))
1644 plane_state->state->legacy_cursor_update = true;
1645
1556 ret = __drm_atomic_helper_disable_plane(plane, plane_state); 1646 ret = __drm_atomic_helper_disable_plane(plane, plane_state);
1557 if (ret != 0) 1647 if (ret != 0)
1558 goto fail; 1648 goto fail;
@@ -1598,15 +1688,12 @@ int __drm_atomic_helper_disable_plane(struct drm_plane *plane,
1598 drm_atomic_set_fb_for_plane(plane_state, NULL); 1688 drm_atomic_set_fb_for_plane(plane_state, NULL);
1599 plane_state->crtc_x = 0; 1689 plane_state->crtc_x = 0;
1600 plane_state->crtc_y = 0; 1690 plane_state->crtc_y = 0;
1601 plane_state->crtc_h = 0;
1602 plane_state->crtc_w = 0; 1691 plane_state->crtc_w = 0;
1692 plane_state->crtc_h = 0;
1603 plane_state->src_x = 0; 1693 plane_state->src_x = 0;
1604 plane_state->src_y = 0; 1694 plane_state->src_y = 0;
1605 plane_state->src_h = 0;
1606 plane_state->src_w = 0; 1695 plane_state->src_w = 0;
1607 1696 plane_state->src_h = 0;
1608 if (plane->crtc && (plane == plane->crtc->cursor))
1609 plane_state->state->legacy_cursor_update = true;
1610 1697
1611 return 0; 1698 return 0;
1612} 1699}
@@ -1741,6 +1828,7 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set,
1741 struct drm_crtc_state *crtc_state; 1828 struct drm_crtc_state *crtc_state;
1742 struct drm_plane_state *primary_state; 1829 struct drm_plane_state *primary_state;
1743 struct drm_crtc *crtc = set->crtc; 1830 struct drm_crtc *crtc = set->crtc;
1831 int hdisplay, vdisplay;
1744 int ret; 1832 int ret;
1745 1833
1746 crtc_state = drm_atomic_get_crtc_state(state, crtc); 1834 crtc_state = drm_atomic_get_crtc_state(state, crtc);
@@ -1783,19 +1871,21 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set,
1783 if (ret != 0) 1871 if (ret != 0)
1784 return ret; 1872 return ret;
1785 1873
1874 drm_crtc_get_hv_timing(set->mode, &hdisplay, &vdisplay);
1875
1786 drm_atomic_set_fb_for_plane(primary_state, set->fb); 1876 drm_atomic_set_fb_for_plane(primary_state, set->fb);
1787 primary_state->crtc_x = 0; 1877 primary_state->crtc_x = 0;
1788 primary_state->crtc_y = 0; 1878 primary_state->crtc_y = 0;
1789 primary_state->crtc_h = set->mode->vdisplay; 1879 primary_state->crtc_w = hdisplay;
1790 primary_state->crtc_w = set->mode->hdisplay; 1880 primary_state->crtc_h = vdisplay;
1791 primary_state->src_x = set->x << 16; 1881 primary_state->src_x = set->x << 16;
1792 primary_state->src_y = set->y << 16; 1882 primary_state->src_y = set->y << 16;
1793 if (primary_state->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) { 1883 if (primary_state->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) {
1794 primary_state->src_h = set->mode->hdisplay << 16; 1884 primary_state->src_w = vdisplay << 16;
1795 primary_state->src_w = set->mode->vdisplay << 16; 1885 primary_state->src_h = hdisplay << 16;
1796 } else { 1886 } else {
1797 primary_state->src_h = set->mode->vdisplay << 16; 1887 primary_state->src_w = hdisplay << 16;
1798 primary_state->src_w = set->mode->hdisplay << 16; 1888 primary_state->src_h = vdisplay << 16;
1799 } 1889 }
1800 1890
1801commit: 1891commit:
@@ -1807,6 +1897,161 @@ commit:
1807} 1897}
1808 1898
1809/** 1899/**
1900 * drm_atomic_helper_disable_all - disable all currently active outputs
1901 * @dev: DRM device
1902 * @ctx: lock acquisition context
1903 *
1904 * Loops through all connectors, finding those that aren't turned off and then
1905 * turns them off by setting their DPMS mode to OFF and deactivating the CRTC
1906 * that they are connected to.
1907 *
1908 * This is used for example in suspend/resume to disable all currently active
1909 * functions when suspending.
1910 *
1911 * Note that if callers haven't already acquired all modeset locks this might
1912 * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
1913 *
1914 * Returns:
1915 * 0 on success or a negative error code on failure.
1916 *
1917 * See also:
1918 * drm_atomic_helper_suspend(), drm_atomic_helper_resume()
1919 */
1920int drm_atomic_helper_disable_all(struct drm_device *dev,
1921 struct drm_modeset_acquire_ctx *ctx)
1922{
1923 struct drm_atomic_state *state;
1924 struct drm_connector *conn;
1925 int err;
1926
1927 state = drm_atomic_state_alloc(dev);
1928 if (!state)
1929 return -ENOMEM;
1930
1931 state->acquire_ctx = ctx;
1932
1933 drm_for_each_connector(conn, dev) {
1934 struct drm_crtc *crtc = conn->state->crtc;
1935 struct drm_crtc_state *crtc_state;
1936
1937 if (!crtc || conn->dpms != DRM_MODE_DPMS_ON)
1938 continue;
1939
1940 crtc_state = drm_atomic_get_crtc_state(state, crtc);
1941 if (IS_ERR(crtc_state)) {
1942 err = PTR_ERR(crtc_state);
1943 goto free;
1944 }
1945
1946 crtc_state->active = false;
1947 }
1948
1949 err = drm_atomic_commit(state);
1950
1951free:
1952 if (err < 0)
1953 drm_atomic_state_free(state);
1954
1955 return err;
1956}
1957EXPORT_SYMBOL(drm_atomic_helper_disable_all);
1958
1959/**
1960 * drm_atomic_helper_suspend - subsystem-level suspend helper
1961 * @dev: DRM device
1962 *
1963 * Duplicates the current atomic state, disables all active outputs and then
1964 * returns a pointer to the original atomic state to the caller. Drivers can
1965 * pass this pointer to the drm_atomic_helper_resume() helper upon resume to
1966 * restore the output configuration that was active at the time the system
1967 * entered suspend.
1968 *
1969 * Note that it is potentially unsafe to use this. The atomic state object
1970 * returned by this function is assumed to be persistent. Drivers must ensure
1971 * that this holds true. Before calling this function, drivers must make sure
1972 * to suspend fbdev emulation so that nothing can be using the device.
1973 *
1974 * Returns:
1975 * A pointer to a copy of the state before suspend on success or an ERR_PTR()-
1976 * encoded error code on failure. Drivers should store the returned atomic
1977 * state object and pass it to the drm_atomic_helper_resume() helper upon
1978 * resume.
1979 *
1980 * See also:
1981 * drm_atomic_helper_duplicate_state(), drm_atomic_helper_disable_all(),
1982 * drm_atomic_helper_resume()
1983 */
1984struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev)
1985{
1986 struct drm_modeset_acquire_ctx ctx;
1987 struct drm_atomic_state *state;
1988 int err;
1989
1990 drm_modeset_acquire_init(&ctx, 0);
1991
1992retry:
1993 err = drm_modeset_lock_all_ctx(dev, &ctx);
1994 if (err < 0) {
1995 state = ERR_PTR(err);
1996 goto unlock;
1997 }
1998
1999 state = drm_atomic_helper_duplicate_state(dev, &ctx);
2000 if (IS_ERR(state))
2001 goto unlock;
2002
2003 err = drm_atomic_helper_disable_all(dev, &ctx);
2004 if (err < 0) {
2005 drm_atomic_state_free(state);
2006 state = ERR_PTR(err);
2007 goto unlock;
2008 }
2009
2010unlock:
2011 if (PTR_ERR(state) == -EDEADLK) {
2012 drm_modeset_backoff(&ctx);
2013 goto retry;
2014 }
2015
2016 drm_modeset_drop_locks(&ctx);
2017 drm_modeset_acquire_fini(&ctx);
2018 return state;
2019}
2020EXPORT_SYMBOL(drm_atomic_helper_suspend);
2021
2022/**
2023 * drm_atomic_helper_resume - subsystem-level resume helper
2024 * @dev: DRM device
2025 * @state: atomic state to resume to
2026 *
2027 * Calls drm_mode_config_reset() to synchronize hardware and software states,
2028 * grabs all modeset locks and commits the atomic state object. This can be
2029 * used in conjunction with the drm_atomic_helper_suspend() helper to
2030 * implement suspend/resume for drivers that support atomic mode-setting.
2031 *
2032 * Returns:
2033 * 0 on success or a negative error code on failure.
2034 *
2035 * See also:
2036 * drm_atomic_helper_suspend()
2037 */
2038int drm_atomic_helper_resume(struct drm_device *dev,
2039 struct drm_atomic_state *state)
2040{
2041 struct drm_mode_config *config = &dev->mode_config;
2042 int err;
2043
2044 drm_mode_config_reset(dev);
2045 drm_modeset_lock_all(dev);
2046 state->acquire_ctx = config->acquire_ctx;
2047 err = drm_atomic_commit(state);
2048 drm_modeset_unlock_all(dev);
2049
2050 return err;
2051}
2052EXPORT_SYMBOL(drm_atomic_helper_resume);
2053
2054/**
1810 * drm_atomic_helper_crtc_set_property - helper for crtc properties 2055 * drm_atomic_helper_crtc_set_property - helper for crtc properties
1811 * @crtc: DRM crtc 2056 * @crtc: DRM crtc
1812 * @property: DRM property 2057 * @property: DRM property
@@ -2162,6 +2407,12 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_dpms);
2162 * The simpler solution is to just reset the software state to everything off, 2407 * The simpler solution is to just reset the software state to everything off,
2163 * which is easiest to do by calling drm_mode_config_reset(). To facilitate this 2408 * which is easiest to do by calling drm_mode_config_reset(). To facilitate this
2164 * the atomic helpers provide default reset implementations for all hooks. 2409 * the atomic helpers provide default reset implementations for all hooks.
2410 *
2411 * On the upside the precise state tracking of atomic simplifies system suspend
2412 * and resume a lot. For drivers using drm_mode_config_reset() a complete recipe
2413 * is implemented in drm_atomic_helper_suspend() and drm_atomic_helper_resume().
2414 * For other drivers the building blocks are split out, see the documentation
2415 * for these functions.
2165 */ 2416 */
2166 2417
2167/** 2418/**
@@ -2173,7 +2424,7 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_dpms);
2173 */ 2424 */
2174void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc) 2425void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc)
2175{ 2426{
2176 if (crtc->state && crtc->state->mode_blob) 2427 if (crtc->state)
2177 drm_property_unreference_blob(crtc->state->mode_blob); 2428 drm_property_unreference_blob(crtc->state->mode_blob);
2178 kfree(crtc->state); 2429 kfree(crtc->state);
2179 crtc->state = kzalloc(sizeof(*crtc->state), GFP_KERNEL); 2430 crtc->state = kzalloc(sizeof(*crtc->state), GFP_KERNEL);
@@ -2241,8 +2492,7 @@ EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);
2241void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc, 2492void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
2242 struct drm_crtc_state *state) 2493 struct drm_crtc_state *state)
2243{ 2494{
2244 if (state->mode_blob) 2495 drm_property_unreference_blob(state->mode_blob);
2245 drm_property_unreference_blob(state->mode_blob);
2246} 2496}
2247EXPORT_SYMBOL(__drm_atomic_helper_crtc_destroy_state); 2497EXPORT_SYMBOL(__drm_atomic_helper_crtc_destroy_state);
2248 2498
@@ -2419,7 +2669,9 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
2419 * @ctx: lock acquisition context 2669 * @ctx: lock acquisition context
2420 * 2670 *
2421 * Makes a copy of the current atomic state by looping over all objects and 2671 * Makes a copy of the current atomic state by looping over all objects and
2422 * duplicating their respective states. 2672 * duplicating their respective states. This is used for example by suspend/
2673 * resume support code to save the state prior to suspend such that it can
2674 * be restored upon resume.
2423 * 2675 *
2424 * Note that this treats atomic state as persistent between save and restore. 2676 * Note that this treats atomic state as persistent between save and restore.
2425 * Drivers must make sure that this is possible and won't result in confusion 2677 * Drivers must make sure that this is possible and won't result in confusion
@@ -2431,6 +2683,9 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
2431 * Returns: 2683 * Returns:
2432 * A pointer to the copy of the atomic state object on success or an 2684 * A pointer to the copy of the atomic state object on success or an
2433 * ERR_PTR()-encoded error code on failure. 2685 * ERR_PTR()-encoded error code on failure.
2686 *
2687 * See also:
2688 * drm_atomic_helper_suspend(), drm_atomic_helper_resume()
2434 */ 2689 */
2435struct drm_atomic_state * 2690struct drm_atomic_state *
2436drm_atomic_helper_duplicate_state(struct drm_device *dev, 2691drm_atomic_helper_duplicate_state(struct drm_device *dev,
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index 6b8f7211e543..bd93453afa61 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -31,14 +31,14 @@
31/** 31/**
32 * DOC: overview 32 * DOC: overview
33 * 33 *
34 * drm_bridge represents a device that hangs on to an encoder. These are handy 34 * struct &drm_bridge represents a device that hangs on to an encoder. These are
35 * when a regular drm_encoder entity isn't enough to represent the entire 35 * handy when a regular &drm_encoder entity isn't enough to represent the entire
36 * encoder chain. 36 * encoder chain.
37 * 37 *
38 * A bridge is always associated to a single drm_encoder at a time, but can be 38 * A bridge is always attached to a single &drm_encoder at a time, but can be
39 * either connected to it directly, or through an intermediate bridge: 39 * either connected to it directly, or through an intermediate bridge:
40 * 40 *
41 * encoder ---> bridge B ---> bridge A 41 * encoder ---> bridge B ---> bridge A
42 * 42 *
43 * Here, the output of the encoder feeds to bridge B, and that furthers feeds to 43 * Here, the output of the encoder feeds to bridge B, and that furthers feeds to
44 * bridge A. 44 * bridge A.
@@ -46,11 +46,16 @@
46 * The driver using the bridge is responsible to make the associations between 46 * The driver using the bridge is responsible to make the associations between
47 * the encoder and bridges. Once these links are made, the bridges will 47 * the encoder and bridges. Once these links are made, the bridges will
48 * participate along with encoder functions to perform mode_set/enable/disable 48 * participate along with encoder functions to perform mode_set/enable/disable
49 * through the ops provided in drm_bridge_funcs. 49 * through the ops provided in &drm_bridge_funcs.
50 * 50 *
51 * drm_bridge, like drm_panel, aren't drm_mode_object entities like planes, 51 * drm_bridge, like drm_panel, aren't drm_mode_object entities like planes,
52 * crtcs, encoders or connectors. They just provide additional hooks to get the 52 * CRTCs, encoders or connectors and hence are not visible to userspace. They
53 * desired output at the end of the encoder chain. 53 * just provide additional hooks to get the desired output at the end of the
54 * encoder chain.
55 *
56 * Bridges can also be chained up using the next pointer in struct &drm_bridge.
57 *
58 * Both legacy CRTC helpers and the new atomic modeset helpers support bridges.
54 */ 59 */
55 60
56static DEFINE_MUTEX(bridge_lock); 61static DEFINE_MUTEX(bridge_lock);
@@ -122,34 +127,12 @@ EXPORT_SYMBOL(drm_bridge_attach);
122/** 127/**
123 * DOC: bridge callbacks 128 * DOC: bridge callbacks
124 * 129 *
125 * The drm_bridge_funcs ops are populated by the bridge driver. The drm 130 * The &drm_bridge_funcs ops are populated by the bridge driver. The DRM
126 * internals(atomic and crtc helpers) use the helpers defined in drm_bridge.c 131 * internals (atomic and CRTC helpers) use the helpers defined in drm_bridge.c
127 * These helpers call a specific drm_bridge_funcs op for all the bridges 132 * These helpers call a specific &drm_bridge_funcs op for all the bridges
128 * during encoder configuration. 133 * during encoder configuration.
129 * 134 *
130 * When creating a bridge driver, one can implement drm_bridge_funcs op with 135 * For detailed specification of the bridge callbacks see &drm_bridge_funcs.
131 * the help of these rough rules:
132 *
133 * pre_enable: this contains things needed to be done for the bridge before
134 * its clock and timings are enabled by its source. For a bridge, its source
135 * is generally the encoder or bridge just before it in the encoder chain.
136 *
137 * enable: this contains things needed to be done for the bridge once its
138 * source is enabled. In other words, enable is called once the source is
139 * ready with clock and timing needed by the bridge.
140 *
141 * disable: this contains things needed to be done for the bridge assuming
142 * that its source is still enabled, i.e. clock and timings are still on.
143 *
144 * post_disable: this contains things needed to be done for the bridge once
145 * its source is disabled, i.e. once clocks and timings are off.
146 *
147 * mode_fixup: this should fixup the given mode for the bridge. It is called
148 * after the encoder's mode fixup. mode_fixup can also reject a mode completely
149 * if it's unsuitable for the hardware.
150 *
151 * mode_set: this sets up the mode for the bridge. It assumes that its source
152 * (an encoder or a bridge) has set the mode too.
153 */ 136 */
154 137
155/** 138/**
@@ -159,7 +142,7 @@ EXPORT_SYMBOL(drm_bridge_attach);
159 * @mode: desired mode to be set for the bridge 142 * @mode: desired mode to be set for the bridge
160 * @adjusted_mode: updated mode that works for this bridge 143 * @adjusted_mode: updated mode that works for this bridge
161 * 144 *
162 * Calls 'mode_fixup' drm_bridge_funcs op for all the bridges in the 145 * Calls ->mode_fixup() &drm_bridge_funcs op for all the bridges in the
163 * encoder chain, starting from the first bridge to the last. 146 * encoder chain, starting from the first bridge to the last.
164 * 147 *
165 * Note: the bridge passed should be the one closest to the encoder 148 * Note: the bridge passed should be the one closest to the encoder
@@ -186,11 +169,11 @@ bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
186EXPORT_SYMBOL(drm_bridge_mode_fixup); 169EXPORT_SYMBOL(drm_bridge_mode_fixup);
187 170
188/** 171/**
189 * drm_bridge_disable - calls 'disable' drm_bridge_funcs op for all 172 * drm_bridge_disable - calls ->disable() &drm_bridge_funcs op for all
190 * bridges in the encoder chain. 173 * bridges in the encoder chain.
191 * @bridge: bridge control structure 174 * @bridge: bridge control structure
192 * 175 *
193 * Calls 'disable' drm_bridge_funcs op for all the bridges in the encoder 176 * Calls ->disable() &drm_bridge_funcs op for all the bridges in the encoder
194 * chain, starting from the last bridge to the first. These are called before 177 * chain, starting from the last bridge to the first. These are called before
195 * calling the encoder's prepare op. 178 * calling the encoder's prepare op.
196 * 179 *
@@ -208,11 +191,11 @@ void drm_bridge_disable(struct drm_bridge *bridge)
208EXPORT_SYMBOL(drm_bridge_disable); 191EXPORT_SYMBOL(drm_bridge_disable);
209 192
210/** 193/**
211 * drm_bridge_post_disable - calls 'post_disable' drm_bridge_funcs op for 194 * drm_bridge_post_disable - calls ->post_disable() &drm_bridge_funcs op for
212 * all bridges in the encoder chain. 195 * all bridges in the encoder chain.
213 * @bridge: bridge control structure 196 * @bridge: bridge control structure
214 * 197 *
215 * Calls 'post_disable' drm_bridge_funcs op for all the bridges in the 198 * Calls ->post_disable() &drm_bridge_funcs op for all the bridges in the
216 * encoder chain, starting from the first bridge to the last. These are called 199 * encoder chain, starting from the first bridge to the last. These are called
217 * after completing the encoder's prepare op. 200 * after completing the encoder's prepare op.
218 * 201 *
@@ -236,7 +219,7 @@ EXPORT_SYMBOL(drm_bridge_post_disable);
236 * @mode: desired mode to be set for the bridge 219 * @mode: desired mode to be set for the bridge
237 * @adjusted_mode: updated mode that works for this bridge 220 * @adjusted_mode: updated mode that works for this bridge
238 * 221 *
239 * Calls 'mode_set' drm_bridge_funcs op for all the bridges in the 222 * Calls ->mode_set() &drm_bridge_funcs op for all the bridges in the
240 * encoder chain, starting from the first bridge to the last. 223 * encoder chain, starting from the first bridge to the last.
241 * 224 *
242 * Note: the bridge passed should be the one closest to the encoder 225 * Note: the bridge passed should be the one closest to the encoder
@@ -256,11 +239,11 @@ void drm_bridge_mode_set(struct drm_bridge *bridge,
256EXPORT_SYMBOL(drm_bridge_mode_set); 239EXPORT_SYMBOL(drm_bridge_mode_set);
257 240
258/** 241/**
259 * drm_bridge_pre_enable - calls 'pre_enable' drm_bridge_funcs op for all 242 * drm_bridge_pre_enable - calls ->pre_enable() &drm_bridge_funcs op for all
260 * bridges in the encoder chain. 243 * bridges in the encoder chain.
261 * @bridge: bridge control structure 244 * @bridge: bridge control structure
262 * 245 *
263 * Calls 'pre_enable' drm_bridge_funcs op for all the bridges in the encoder 246 * Calls ->pre_enable() &drm_bridge_funcs op for all the bridges in the encoder
264 * chain, starting from the last bridge to the first. These are called 247 * chain, starting from the last bridge to the first. These are called
265 * before calling the encoder's commit op. 248 * before calling the encoder's commit op.
266 * 249 *
@@ -278,11 +261,11 @@ void drm_bridge_pre_enable(struct drm_bridge *bridge)
278EXPORT_SYMBOL(drm_bridge_pre_enable); 261EXPORT_SYMBOL(drm_bridge_pre_enable);
279 262
280/** 263/**
281 * drm_bridge_enable - calls 'enable' drm_bridge_funcs op for all bridges 264 * drm_bridge_enable - calls ->enable() &drm_bridge_funcs op for all bridges
282 * in the encoder chain. 265 * in the encoder chain.
283 * @bridge: bridge control structure 266 * @bridge: bridge control structure
284 * 267 *
285 * Calls 'enable' drm_bridge_funcs op for all the bridges in the encoder 268 * Calls ->enable() &drm_bridge_funcs op for all the bridges in the encoder
286 * chain, starting from the first bridge to the last. These are called 269 * chain, starting from the first bridge to the last. These are called
287 * after completing the encoder's commit op. 270 * after completing the encoder's commit op.
288 * 271 *
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 24c5434abd1c..62fa95fa5471 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -45,7 +45,7 @@
45 45
46static struct drm_framebuffer * 46static struct drm_framebuffer *
47internal_framebuffer_create(struct drm_device *dev, 47internal_framebuffer_create(struct drm_device *dev,
48 struct drm_mode_fb_cmd2 *r, 48 const struct drm_mode_fb_cmd2 *r,
49 struct drm_file *file_priv); 49 struct drm_file *file_priv);
50 50
51/* Avoid boilerplate. I'm tired of typing. */ 51/* Avoid boilerplate. I'm tired of typing. */
@@ -649,6 +649,18 @@ EXPORT_SYMBOL(drm_framebuffer_remove);
649 649
650DEFINE_WW_CLASS(crtc_ww_class); 650DEFINE_WW_CLASS(crtc_ww_class);
651 651
652static unsigned int drm_num_crtcs(struct drm_device *dev)
653{
654 unsigned int num = 0;
655 struct drm_crtc *tmp;
656
657 drm_for_each_crtc(tmp, dev) {
658 num++;
659 }
660
661 return num;
662}
663
652/** 664/**
653 * drm_crtc_init_with_planes - Initialise a new CRTC object with 665 * drm_crtc_init_with_planes - Initialise a new CRTC object with
654 * specified primary and cursor planes. 666 * specified primary and cursor planes.
@@ -657,6 +669,7 @@ DEFINE_WW_CLASS(crtc_ww_class);
657 * @primary: Primary plane for CRTC 669 * @primary: Primary plane for CRTC
658 * @cursor: Cursor plane for CRTC 670 * @cursor: Cursor plane for CRTC
659 * @funcs: callbacks for the new CRTC 671 * @funcs: callbacks for the new CRTC
672 * @name: printf style format string for the CRTC name, or NULL for default name
660 * 673 *
661 * Inits a new object created as base part of a driver crtc object. 674 * Inits a new object created as base part of a driver crtc object.
662 * 675 *
@@ -666,7 +679,8 @@ DEFINE_WW_CLASS(crtc_ww_class);
666int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, 679int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
667 struct drm_plane *primary, 680 struct drm_plane *primary,
668 struct drm_plane *cursor, 681 struct drm_plane *cursor,
669 const struct drm_crtc_funcs *funcs) 682 const struct drm_crtc_funcs *funcs,
683 const char *name, ...)
670{ 684{
671 struct drm_mode_config *config = &dev->mode_config; 685 struct drm_mode_config *config = &dev->mode_config;
672 int ret; 686 int ret;
@@ -682,6 +696,21 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
682 if (ret) 696 if (ret)
683 return ret; 697 return ret;
684 698
699 if (name) {
700 va_list ap;
701
702 va_start(ap, name);
703 crtc->name = kvasprintf(GFP_KERNEL, name, ap);
704 va_end(ap);
705 } else {
706 crtc->name = kasprintf(GFP_KERNEL, "crtc-%d",
707 drm_num_crtcs(dev));
708 }
709 if (!crtc->name) {
710 drm_mode_object_put(dev, &crtc->base);
711 return -ENOMEM;
712 }
713
685 crtc->base.properties = &crtc->properties; 714 crtc->base.properties = &crtc->properties;
686 715
687 list_add_tail(&crtc->head, &config->crtc_list); 716 list_add_tail(&crtc->head, &config->crtc_list);
@@ -728,6 +757,8 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
728 if (crtc->state && crtc->funcs->atomic_destroy_state) 757 if (crtc->state && crtc->funcs->atomic_destroy_state)
729 crtc->funcs->atomic_destroy_state(crtc, crtc->state); 758 crtc->funcs->atomic_destroy_state(crtc, crtc->state);
730 759
760 kfree(crtc->name);
761
731 memset(crtc, 0, sizeof(*crtc)); 762 memset(crtc, 0, sizeof(*crtc));
732} 763}
733EXPORT_SYMBOL(drm_crtc_cleanup); 764EXPORT_SYMBOL(drm_crtc_cleanup);
@@ -1075,6 +1106,7 @@ EXPORT_SYMBOL(drm_connector_unplug_all);
1075 * @encoder: the encoder to init 1106 * @encoder: the encoder to init
1076 * @funcs: callbacks for this encoder 1107 * @funcs: callbacks for this encoder
1077 * @encoder_type: user visible type of the encoder 1108 * @encoder_type: user visible type of the encoder
1109 * @name: printf style format string for the encoder name, or NULL for default name
1078 * 1110 *
1079 * Initialises a preallocated encoder. Encoder should be 1111 * Initialises a preallocated encoder. Encoder should be
1080 * subclassed as part of driver encoder objects. 1112 * subclassed as part of driver encoder objects.
@@ -1085,7 +1117,7 @@ EXPORT_SYMBOL(drm_connector_unplug_all);
1085int drm_encoder_init(struct drm_device *dev, 1117int drm_encoder_init(struct drm_device *dev,
1086 struct drm_encoder *encoder, 1118 struct drm_encoder *encoder,
1087 const struct drm_encoder_funcs *funcs, 1119 const struct drm_encoder_funcs *funcs,
1088 int encoder_type) 1120 int encoder_type, const char *name, ...)
1089{ 1121{
1090 int ret; 1122 int ret;
1091 1123
@@ -1098,9 +1130,17 @@ int drm_encoder_init(struct drm_device *dev,
1098 encoder->dev = dev; 1130 encoder->dev = dev;
1099 encoder->encoder_type = encoder_type; 1131 encoder->encoder_type = encoder_type;
1100 encoder->funcs = funcs; 1132 encoder->funcs = funcs;
1101 encoder->name = kasprintf(GFP_KERNEL, "%s-%d", 1133 if (name) {
1102 drm_encoder_enum_list[encoder_type].name, 1134 va_list ap;
1103 encoder->base.id); 1135
1136 va_start(ap, name);
1137 encoder->name = kvasprintf(GFP_KERNEL, name, ap);
1138 va_end(ap);
1139 } else {
1140 encoder->name = kasprintf(GFP_KERNEL, "%s-%d",
1141 drm_encoder_enum_list[encoder_type].name,
1142 encoder->base.id);
1143 }
1104 if (!encoder->name) { 1144 if (!encoder->name) {
1105 ret = -ENOMEM; 1145 ret = -ENOMEM;
1106 goto out_put; 1146 goto out_put;
@@ -1141,6 +1181,18 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
1141} 1181}
1142EXPORT_SYMBOL(drm_encoder_cleanup); 1182EXPORT_SYMBOL(drm_encoder_cleanup);
1143 1183
1184static unsigned int drm_num_planes(struct drm_device *dev)
1185{
1186 unsigned int num = 0;
1187 struct drm_plane *tmp;
1188
1189 drm_for_each_plane(tmp, dev) {
1190 num++;
1191 }
1192
1193 return num;
1194}
1195
1144/** 1196/**
1145 * drm_universal_plane_init - Initialize a new universal plane object 1197 * drm_universal_plane_init - Initialize a new universal plane object
1146 * @dev: DRM device 1198 * @dev: DRM device
@@ -1150,6 +1202,7 @@ EXPORT_SYMBOL(drm_encoder_cleanup);
1150 * @formats: array of supported formats (%DRM_FORMAT_*) 1202 * @formats: array of supported formats (%DRM_FORMAT_*)
1151 * @format_count: number of elements in @formats 1203 * @format_count: number of elements in @formats
1152 * @type: type of plane (overlay, primary, cursor) 1204 * @type: type of plane (overlay, primary, cursor)
1205 * @name: printf style format string for the plane name, or NULL for default name
1153 * 1206 *
1154 * Initializes a plane object of type @type. 1207 * Initializes a plane object of type @type.
1155 * 1208 *
@@ -1160,7 +1213,8 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
1160 unsigned long possible_crtcs, 1213 unsigned long possible_crtcs,
1161 const struct drm_plane_funcs *funcs, 1214 const struct drm_plane_funcs *funcs,
1162 const uint32_t *formats, unsigned int format_count, 1215 const uint32_t *formats, unsigned int format_count,
1163 enum drm_plane_type type) 1216 enum drm_plane_type type,
1217 const char *name, ...)
1164{ 1218{
1165 struct drm_mode_config *config = &dev->mode_config; 1219 struct drm_mode_config *config = &dev->mode_config;
1166 int ret; 1220 int ret;
@@ -1182,6 +1236,22 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
1182 return -ENOMEM; 1236 return -ENOMEM;
1183 } 1237 }
1184 1238
1239 if (name) {
1240 va_list ap;
1241
1242 va_start(ap, name);
1243 plane->name = kvasprintf(GFP_KERNEL, name, ap);
1244 va_end(ap);
1245 } else {
1246 plane->name = kasprintf(GFP_KERNEL, "plane-%d",
1247 drm_num_planes(dev));
1248 }
1249 if (!plane->name) {
1250 kfree(plane->format_types);
1251 drm_mode_object_put(dev, &plane->base);
1252 return -ENOMEM;
1253 }
1254
1185 memcpy(plane->format_types, formats, format_count * sizeof(uint32_t)); 1255 memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
1186 plane->format_count = format_count; 1256 plane->format_count = format_count;
1187 plane->possible_crtcs = possible_crtcs; 1257 plane->possible_crtcs = possible_crtcs;
@@ -1240,7 +1310,7 @@ int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
1240 1310
1241 type = is_primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; 1311 type = is_primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
1242 return drm_universal_plane_init(dev, plane, possible_crtcs, funcs, 1312 return drm_universal_plane_init(dev, plane, possible_crtcs, funcs,
1243 formats, format_count, type); 1313 formats, format_count, type, NULL);
1244} 1314}
1245EXPORT_SYMBOL(drm_plane_init); 1315EXPORT_SYMBOL(drm_plane_init);
1246 1316
@@ -1272,6 +1342,8 @@ void drm_plane_cleanup(struct drm_plane *plane)
1272 if (plane->state && plane->funcs->atomic_destroy_state) 1342 if (plane->state && plane->funcs->atomic_destroy_state)
1273 plane->funcs->atomic_destroy_state(plane, plane->state); 1343 plane->funcs->atomic_destroy_state(plane, plane->state);
1274 1344
1345 kfree(plane->name);
1346
1275 memset(plane, 0, sizeof(*plane)); 1347 memset(plane, 0, sizeof(*plane));
1276} 1348}
1277EXPORT_SYMBOL(drm_plane_cleanup); 1349EXPORT_SYMBOL(drm_plane_cleanup);
@@ -1801,7 +1873,8 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
1801 copied = 0; 1873 copied = 0;
1802 crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr; 1874 crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
1803 drm_for_each_crtc(crtc, dev) { 1875 drm_for_each_crtc(crtc, dev) {
1804 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); 1876 DRM_DEBUG_KMS("[CRTC:%d:%s]\n",
1877 crtc->base.id, crtc->name);
1805 if (put_user(crtc->base.id, crtc_id + copied)) { 1878 if (put_user(crtc->base.id, crtc_id + copied)) {
1806 ret = -EFAULT; 1879 ret = -EFAULT;
1807 goto out; 1880 goto out;
@@ -2646,7 +2719,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
2646 ret = -ENOENT; 2719 ret = -ENOENT;
2647 goto out; 2720 goto out;
2648 } 2721 }
2649 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); 2722 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
2650 2723
2651 if (crtc_req->mode_valid) { 2724 if (crtc_req->mode_valid) {
2652 /* If we have a mode we need a framebuffer. */ 2725 /* If we have a mode we need a framebuffer. */
@@ -3235,7 +3308,7 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
3235 3308
3236static struct drm_framebuffer * 3309static struct drm_framebuffer *
3237internal_framebuffer_create(struct drm_device *dev, 3310internal_framebuffer_create(struct drm_device *dev,
3238 struct drm_mode_fb_cmd2 *r, 3311 const struct drm_mode_fb_cmd2 *r,
3239 struct drm_file *file_priv) 3312 struct drm_file *file_priv)
3240{ 3313{
3241 struct drm_mode_config *config = &dev->mode_config; 3314 struct drm_mode_config *config = &dev->mode_config;
@@ -4785,9 +4858,7 @@ static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
4785 4858
4786 /* Do DPMS ourselves */ 4859 /* Do DPMS ourselves */
4787 if (property == connector->dev->mode_config.dpms_property) { 4860 if (property == connector->dev->mode_config.dpms_property) {
4788 ret = 0; 4861 ret = (*connector->funcs->dpms)(connector, (int)value);
4789 if (connector->funcs->dpms)
4790 ret = (*connector->funcs->dpms)(connector, (int)value);
4791 } else if (connector->funcs->set_property) 4862 } else if (connector->funcs->set_property)
4792 ret = connector->funcs->set_property(connector, property, value); 4863 ret = connector->funcs->set_property(connector, property, value);
4793 4864
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index ef534758a02c..a02a7f9a6a9d 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -51,6 +51,11 @@
51 * the same callbacks which drivers can use to e.g. restore the modeset 51 * the same callbacks which drivers can use to e.g. restore the modeset
52 * configuration on resume with drm_helper_resume_force_mode(). 52 * configuration on resume with drm_helper_resume_force_mode().
53 * 53 *
54 * Note that this helper library doesn't track the current power state of CRTCs
55 * and encoders. It can call callbacks like ->dpms() even though the hardware is
56 * already in the desired state. This deficiency has been fixed in the atomic
57 * helpers.
58 *
54 * The driver callbacks are mostly compatible with the atomic modeset helpers, 59 * The driver callbacks are mostly compatible with the atomic modeset helpers,
55 * except for the handling of the primary plane: Atomic helpers require that the 60 * except for the handling of the primary plane: Atomic helpers require that the
56 * primary plane is implemented as a real standalone plane and not directly tied 61 * primary plane is implemented as a real standalone plane and not directly tied
@@ -62,6 +67,11 @@
62 * converting to the plane helpers). New drivers must not use these functions 67 * converting to the plane helpers). New drivers must not use these functions
63 * but need to implement the atomic interface instead, potentially using the 68 * but need to implement the atomic interface instead, potentially using the
64 * atomic helpers for that. 69 * atomic helpers for that.
70 *
71 * These legacy modeset helpers use the same function table structures as
72 * all other modesetting helpers. See the documentation for struct
73 * &drm_crtc_helper_funcs, struct &drm_encoder_helper_funcs and struct
74 * &drm_connector_helper_funcs.
65 */ 75 */
66MODULE_AUTHOR("David Airlie, Jesse Barnes"); 76MODULE_AUTHOR("David Airlie, Jesse Barnes");
67MODULE_DESCRIPTION("DRM KMS helper"); 77MODULE_DESCRIPTION("DRM KMS helper");
@@ -206,8 +216,8 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev)
206 * @dev: DRM device 216 * @dev: DRM device
207 * 217 *
208 * This function walks through the entire mode setting configuration of @dev. It 218 * This function walks through the entire mode setting configuration of @dev. It
209 * will remove any crtc links of unused encoders and encoder links of 219 * will remove any CRTC links of unused encoders and encoder links of
210 * disconnected connectors. Then it will disable all unused encoders and crtcs 220 * disconnected connectors. Then it will disable all unused encoders and CRTCs
211 * either by calling their disable callback if available or by calling their 221 * either by calling their disable callback if available or by calling their
212 * dpms callback with DRM_MODE_DPMS_OFF. 222 * dpms callback with DRM_MODE_DPMS_OFF.
213 */ 223 */
@@ -329,7 +339,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
329 DRM_DEBUG_KMS("CRTC fixup failed\n"); 339 DRM_DEBUG_KMS("CRTC fixup failed\n");
330 goto done; 340 goto done;
331 } 341 }
332 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); 342 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
333 343
334 crtc->hwmode = *adjusted_mode; 344 crtc->hwmode = *adjusted_mode;
335 345
@@ -445,11 +455,36 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
445 * drm_crtc_helper_set_config - set a new config from userspace 455 * drm_crtc_helper_set_config - set a new config from userspace
446 * @set: mode set configuration 456 * @set: mode set configuration
447 * 457 *
448 * Setup a new configuration, provided by the upper layers (either an ioctl call 458 * The drm_crtc_helper_set_config() helper function implements the set_config
449 * from userspace or internally e.g. from the fbdev support code) in @set, and 459 * callback of struct &drm_crtc_funcs for drivers using the legacy CRTC helpers.
450 * enable it. This is the main helper functions for drivers that implement 460 *
451 * kernel mode setting with the crtc helper functions and the assorted 461 * It first tries to locate the best encoder for each connector by calling the
452 * ->prepare(), ->modeset() and ->commit() helper callbacks. 462 * connector ->best_encoder() (struct &drm_connector_helper_funcs) helper
463 * operation.
464 *
465 * After locating the appropriate encoders, the helper function will call the
466 * mode_fixup encoder and CRTC helper operations to adjust the requested mode,
467 * or reject it completely in which case an error will be returned to the
468 * application. If the new configuration after mode adjustment is identical to
469 * the current configuration the helper function will return without performing
470 * any other operation.
471 *
472 * If the adjusted mode is identical to the current mode but changes to the
473 * frame buffer need to be applied, the drm_crtc_helper_set_config() function
474 * will call the CRTC ->mode_set_base() (struct &drm_crtc_helper_funcs) helper
475 * operation.
476 *
477 * If the adjusted mode differs from the current mode, or if the
478 * ->mode_set_base() helper operation is not provided, the helper function
479 * performs a full mode set sequence by calling the ->prepare(), ->mode_set()
480 * and ->commit() CRTC and encoder helper operations, in that order.
481 * Alternatively it can also use the dpms and disable helper operations. For
482 * details see struct &drm_crtc_helper_funcs and struct
483 * &drm_encoder_helper_funcs.
484 *
485 * This function is deprecated. New drivers must implement atomic modeset
486 * support, for which this function is unsuitable. Instead drivers should use
487 * drm_atomic_helper_set_config().
453 * 488 *
454 * Returns: 489 * Returns:
455 * Returns 0 on success, negative errno numbers on failure. 490 * Returns 0 on success, negative errno numbers on failure.
@@ -484,11 +519,13 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
484 set->fb = NULL; 519 set->fb = NULL;
485 520
486 if (set->fb) { 521 if (set->fb) {
487 DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n", 522 DRM_DEBUG_KMS("[CRTC:%d:%s] [FB:%d] #connectors=%d (x y) (%i %i)\n",
488 set->crtc->base.id, set->fb->base.id, 523 set->crtc->base.id, set->crtc->name,
489 (int)set->num_connectors, set->x, set->y); 524 set->fb->base.id,
525 (int)set->num_connectors, set->x, set->y);
490 } else { 526 } else {
491 DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id); 527 DRM_DEBUG_KMS("[CRTC:%d:%s] [NOFB]\n",
528 set->crtc->base.id, set->crtc->name);
492 drm_crtc_helper_disable(set->crtc); 529 drm_crtc_helper_disable(set->crtc);
493 return 0; 530 return 0;
494 } 531 }
@@ -628,12 +665,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
628 connector->encoder->crtc = new_crtc; 665 connector->encoder->crtc = new_crtc;
629 } 666 }
630 if (new_crtc) { 667 if (new_crtc) {
631 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n", 668 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d:%s]\n",
632 connector->base.id, connector->name, 669 connector->base.id, connector->name,
633 new_crtc->base.id); 670 new_crtc->base.id, new_crtc->name);
634 } else { 671 } else {
635 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n", 672 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
636 connector->base.id, connector->name); 673 connector->base.id, connector->name);
637 } 674 }
638 } 675 }
639 676
@@ -650,8 +687,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
650 if (!drm_crtc_helper_set_mode(set->crtc, set->mode, 687 if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
651 set->x, set->y, 688 set->x, set->y,
652 save_set.fb)) { 689 save_set.fb)) {
653 DRM_ERROR("failed to set mode on [CRTC:%d]\n", 690 DRM_ERROR("failed to set mode on [CRTC:%d:%s]\n",
654 set->crtc->base.id); 691 set->crtc->base.id, set->crtc->name);
655 set->crtc->primary->fb = save_set.fb; 692 set->crtc->primary->fb = save_set.fb;
656 ret = -EINVAL; 693 ret = -EINVAL;
657 goto fail; 694 goto fail;
@@ -758,10 +795,18 @@ static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
758 * @connector: affected connector 795 * @connector: affected connector
759 * @mode: DPMS mode 796 * @mode: DPMS mode
760 * 797 *
761 * This is the main helper function provided by the crtc helper framework for 798 * The drm_helper_connector_dpms() helper function implements the ->dpms()
799 * callback of struct &drm_connector_funcs for drivers using the legacy CRTC helpers.
800 *
801 * This is the main helper function provided by the CRTC helper framework for
762 * implementing the DPMS connector attribute. It computes the new desired DPMS 802 * implementing the DPMS connector attribute. It computes the new desired DPMS
763 * state for all encoders and crtcs in the output mesh and calls the ->dpms() 803 * state for all encoders and CRTCs in the output mesh and calls the ->dpms()
764 * callback provided by the driver appropriately. 804 * callbacks provided by the driver in struct &drm_crtc_helper_funcs and struct
805 * &drm_encoder_helper_funcs appropriately.
806 *
807 * This function is deprecated. New drivers must implement atomic modeset
808 * support, for which this function is unsuitable. Instead drivers should use
809 * drm_atomic_helper_connector_dpms().
765 * 810 *
766 * Returns: 811 * Returns:
767 * Always returns 0. 812 * Always returns 0.
@@ -818,7 +863,7 @@ EXPORT_SYMBOL(drm_helper_connector_dpms);
818 * metadata fields. 863 * metadata fields.
819 */ 864 */
820void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, 865void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
821 struct drm_mode_fb_cmd2 *mode_cmd) 866 const struct drm_mode_fb_cmd2 *mode_cmd)
822{ 867{
823 int i; 868 int i;
824 869
@@ -855,6 +900,12 @@ EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
855 * due to slight differences in allocating shared resources when the 900 * due to slight differences in allocating shared resources when the
856 * configuration is restored in a different order than when userspace set it up) 901 * configuration is restored in a different order than when userspace set it up)
857 * need to use their own restore logic. 902 * need to use their own restore logic.
903 *
904 * This function is deprecated. New drivers should implement atomic mode-
905 * setting and use the atomic suspend/resume helpers.
906 *
907 * See also:
908 * drm_atomic_helper_suspend(), drm_atomic_helper_resume()
858 */ 909 */
859void drm_helper_resume_force_mode(struct drm_device *dev) 910void drm_helper_resume_force_mode(struct drm_device *dev)
860{ 911{
@@ -913,9 +964,9 @@ EXPORT_SYMBOL(drm_helper_resume_force_mode);
913 * @old_fb: previous framebuffer 964 * @old_fb: previous framebuffer
914 * 965 *
915 * This function implements a callback useable as the ->mode_set callback 966 * This function implements a callback useable as the ->mode_set callback
916 * required by the crtc helpers. Besides the atomic plane helper functions for 967 * required by the CRTC helpers. Besides the atomic plane helper functions for
917 * the primary plane the driver must also provide the ->mode_set_nofb callback 968 * the primary plane the driver must also provide the ->mode_set_nofb callback
918 * to set up the crtc. 969 * to set up the CRTC.
919 * 970 *
920 * This is a transitional helper useful for converting drivers to the atomic 971 * This is a transitional helper useful for converting drivers to the atomic
921 * interfaces. 972 * interfaces.
@@ -979,7 +1030,7 @@ EXPORT_SYMBOL(drm_helper_crtc_mode_set);
979 * @old_fb: previous framebuffer 1030 * @old_fb: previous framebuffer
980 * 1031 *
981 * This function implements a callback useable as the ->mode_set_base used 1032 * This function implements a callback useable as the ->mode_set_base used
982 * required by the crtc helpers. The driver must provide the atomic plane helper 1033 * required by the CRTC helpers. The driver must provide the atomic plane helper
983 * functions for the primary plane. 1034 * functions for the primary plane.
984 * 1035 *
985 * This is a transitional helper useful for converting drivers to the atomic 1036 * This is a transitional helper useful for converting drivers to the atomic
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 9362609df38a..7dd6728dd092 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -160,6 +160,11 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
160 goto out_unlock; 160 goto out_unlock;
161 } 161 }
162 162
163 if (!file_priv->allowed_master) {
164 ret = drm_new_set_master(dev, file_priv);
165 goto out_unlock;
166 }
167
163 file_priv->minor->master = drm_master_get(file_priv->master); 168 file_priv->minor->master = drm_master_get(file_priv->master);
164 file_priv->is_master = 1; 169 file_priv->is_master = 1;
165 if (dev->driver->master_set) { 170 if (dev->driver->master_set) {
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index d5d2c03fd136..c214f1246cb4 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -2545,6 +2545,33 @@ cea_mode_alternate_clock(const struct drm_display_mode *cea_mode)
2545 return clock; 2545 return clock;
2546} 2546}
2547 2547
2548static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_match,
2549 unsigned int clock_tolerance)
2550{
2551 u8 mode;
2552
2553 if (!to_match->clock)
2554 return 0;
2555
2556 for (mode = 0; mode < ARRAY_SIZE(edid_cea_modes); mode++) {
2557 const struct drm_display_mode *cea_mode = &edid_cea_modes[mode];
2558 unsigned int clock1, clock2;
2559
2560 /* Check both 60Hz and 59.94Hz */
2561 clock1 = cea_mode->clock;
2562 clock2 = cea_mode_alternate_clock(cea_mode);
2563
2564 if (abs(to_match->clock - clock1) > clock_tolerance &&
2565 abs(to_match->clock - clock2) > clock_tolerance)
2566 continue;
2567
2568 if (drm_mode_equal_no_clocks(to_match, cea_mode))
2569 return mode + 1;
2570 }
2571
2572 return 0;
2573}
2574
2548/** 2575/**
2549 * drm_match_cea_mode - look for a CEA mode matching given mode 2576 * drm_match_cea_mode - look for a CEA mode matching given mode
2550 * @to_match: display mode 2577 * @to_match: display mode
@@ -2609,6 +2636,33 @@ hdmi_mode_alternate_clock(const struct drm_display_mode *hdmi_mode)
2609 return cea_mode_alternate_clock(hdmi_mode); 2636 return cea_mode_alternate_clock(hdmi_mode);
2610} 2637}
2611 2638
2639static u8 drm_match_hdmi_mode_clock_tolerance(const struct drm_display_mode *to_match,
2640 unsigned int clock_tolerance)
2641{
2642 u8 mode;
2643
2644 if (!to_match->clock)
2645 return 0;
2646
2647 for (mode = 0; mode < ARRAY_SIZE(edid_4k_modes); mode++) {
2648 const struct drm_display_mode *hdmi_mode = &edid_4k_modes[mode];
2649 unsigned int clock1, clock2;
2650
2651 /* Make sure to also match alternate clocks */
2652 clock1 = hdmi_mode->clock;
2653 clock2 = hdmi_mode_alternate_clock(hdmi_mode);
2654
2655 if (abs(to_match->clock - clock1) > clock_tolerance &&
2656 abs(to_match->clock - clock2) > clock_tolerance)
2657 continue;
2658
2659 if (drm_mode_equal_no_clocks(to_match, hdmi_mode))
2660 return mode + 1;
2661 }
2662
2663 return 0;
2664}
2665
2612/* 2666/*
2613 * drm_match_hdmi_mode - look for a HDMI mode matching given mode 2667 * drm_match_hdmi_mode - look for a HDMI mode matching given mode
2614 * @to_match: display mode 2668 * @to_match: display mode
@@ -3119,14 +3173,18 @@ static void fixup_detailed_cea_mode_clock(struct drm_display_mode *mode)
3119 u8 mode_idx; 3173 u8 mode_idx;
3120 const char *type; 3174 const char *type;
3121 3175
3122 mode_idx = drm_match_cea_mode(mode) - 1; 3176 /*
3177 * allow 5kHz clock difference either way to account for
3178 * the 10kHz clock resolution limit of detailed timings.
3179 */
3180 mode_idx = drm_match_cea_mode_clock_tolerance(mode, 5) - 1;
3123 if (mode_idx < ARRAY_SIZE(edid_cea_modes)) { 3181 if (mode_idx < ARRAY_SIZE(edid_cea_modes)) {
3124 type = "CEA"; 3182 type = "CEA";
3125 cea_mode = &edid_cea_modes[mode_idx]; 3183 cea_mode = &edid_cea_modes[mode_idx];
3126 clock1 = cea_mode->clock; 3184 clock1 = cea_mode->clock;
3127 clock2 = cea_mode_alternate_clock(cea_mode); 3185 clock2 = cea_mode_alternate_clock(cea_mode);
3128 } else { 3186 } else {
3129 mode_idx = drm_match_hdmi_mode(mode) - 1; 3187 mode_idx = drm_match_hdmi_mode_clock_tolerance(mode, 5) - 1;
3130 if (mode_idx < ARRAY_SIZE(edid_4k_modes)) { 3188 if (mode_idx < ARRAY_SIZE(edid_4k_modes)) {
3131 type = "HDMI"; 3189 type = "HDMI";
3132 cea_mode = &edid_4k_modes[mode_idx]; 3190 cea_mode = &edid_4k_modes[mode_idx];
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index c19a62561183..b7d5b848d2f8 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -74,7 +74,7 @@ static struct drm_framebuffer_funcs drm_fb_cma_funcs = {
74}; 74};
75 75
76static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev, 76static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
77 struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_cma_object **obj, 77 const const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_cma_object **obj,
78 unsigned int num_planes) 78 unsigned int num_planes)
79{ 79{
80 struct drm_fb_cma *fb_cma; 80 struct drm_fb_cma *fb_cma;
@@ -107,7 +107,7 @@ static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
107 * checked before calling this function. 107 * checked before calling this function.
108 */ 108 */
109struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev, 109struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
110 struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd) 110 struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd)
111{ 111{
112 struct drm_fb_cma *fb_cma; 112 struct drm_fb_cma *fb_cma;
113 struct drm_gem_cma_object *objs[4]; 113 struct drm_gem_cma_object *objs[4];
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index e673c13c7391..69cbab5e5c81 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -342,6 +342,7 @@ static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper)
342 struct drm_plane *plane; 342 struct drm_plane *plane;
343 struct drm_atomic_state *state; 343 struct drm_atomic_state *state;
344 int i, ret; 344 int i, ret;
345 unsigned plane_mask;
345 346
346 state = drm_atomic_state_alloc(dev); 347 state = drm_atomic_state_alloc(dev);
347 if (!state) 348 if (!state)
@@ -349,11 +350,10 @@ static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper)
349 350
350 state->acquire_ctx = dev->mode_config.acquire_ctx; 351 state->acquire_ctx = dev->mode_config.acquire_ctx;
351retry: 352retry:
353 plane_mask = 0;
352 drm_for_each_plane(plane, dev) { 354 drm_for_each_plane(plane, dev) {
353 struct drm_plane_state *plane_state; 355 struct drm_plane_state *plane_state;
354 356
355 plane->old_fb = plane->fb;
356
357 plane_state = drm_atomic_get_plane_state(state, plane); 357 plane_state = drm_atomic_get_plane_state(state, plane);
358 if (IS_ERR(plane_state)) { 358 if (IS_ERR(plane_state)) {
359 ret = PTR_ERR(plane_state); 359 ret = PTR_ERR(plane_state);
@@ -362,6 +362,9 @@ retry:
362 362
363 plane_state->rotation = BIT(DRM_ROTATE_0); 363 plane_state->rotation = BIT(DRM_ROTATE_0);
364 364
365 plane->old_fb = plane->fb;
366 plane_mask |= 1 << drm_plane_index(plane);
367
365 /* disable non-primary: */ 368 /* disable non-primary: */
366 if (plane->type == DRM_PLANE_TYPE_PRIMARY) 369 if (plane->type == DRM_PLANE_TYPE_PRIMARY)
367 continue; 370 continue;
@@ -382,19 +385,7 @@ retry:
382 ret = drm_atomic_commit(state); 385 ret = drm_atomic_commit(state);
383 386
384fail: 387fail:
385 drm_for_each_plane(plane, dev) { 388 drm_atomic_clean_old_fb(dev, plane_mask, ret);
386 if (ret == 0) {
387 struct drm_framebuffer *new_fb = plane->state->fb;
388 if (new_fb)
389 drm_framebuffer_reference(new_fb);
390 plane->fb = new_fb;
391 plane->crtc = plane->state->crtc;
392
393 if (plane->old_fb)
394 drm_framebuffer_unreference(plane->old_fb);
395 }
396 plane->old_fb = NULL;
397 }
398 389
399 if (ret == -EDEADLK) 390 if (ret == -EDEADLK)
400 goto backoff; 391 goto backoff;
@@ -1236,7 +1227,9 @@ static int pan_display_atomic(struct fb_var_screeninfo *var,
1236 struct drm_fb_helper *fb_helper = info->par; 1227 struct drm_fb_helper *fb_helper = info->par;
1237 struct drm_device *dev = fb_helper->dev; 1228 struct drm_device *dev = fb_helper->dev;
1238 struct drm_atomic_state *state; 1229 struct drm_atomic_state *state;
1230 struct drm_plane *plane;
1239 int i, ret; 1231 int i, ret;
1232 unsigned plane_mask;
1240 1233
1241 state = drm_atomic_state_alloc(dev); 1234 state = drm_atomic_state_alloc(dev);
1242 if (!state) 1235 if (!state)
@@ -1244,19 +1237,22 @@ static int pan_display_atomic(struct fb_var_screeninfo *var,
1244 1237
1245 state->acquire_ctx = dev->mode_config.acquire_ctx; 1238 state->acquire_ctx = dev->mode_config.acquire_ctx;
1246retry: 1239retry:
1240 plane_mask = 0;
1247 for(i = 0; i < fb_helper->crtc_count; i++) { 1241 for(i = 0; i < fb_helper->crtc_count; i++) {
1248 struct drm_mode_set *mode_set; 1242 struct drm_mode_set *mode_set;
1249 1243
1250 mode_set = &fb_helper->crtc_info[i].mode_set; 1244 mode_set = &fb_helper->crtc_info[i].mode_set;
1251 1245
1252 mode_set->crtc->primary->old_fb = mode_set->crtc->primary->fb;
1253
1254 mode_set->x = var->xoffset; 1246 mode_set->x = var->xoffset;
1255 mode_set->y = var->yoffset; 1247 mode_set->y = var->yoffset;
1256 1248
1257 ret = __drm_atomic_helper_set_config(mode_set, state); 1249 ret = __drm_atomic_helper_set_config(mode_set, state);
1258 if (ret != 0) 1250 if (ret != 0)
1259 goto fail; 1251 goto fail;
1252
1253 plane = mode_set->crtc->primary;
1254 plane_mask |= drm_plane_index(plane);
1255 plane->old_fb = plane->fb;
1260 } 1256 }
1261 1257
1262 ret = drm_atomic_commit(state); 1258 ret = drm_atomic_commit(state);
@@ -1268,26 +1264,7 @@ retry:
1268 1264
1269 1265
1270fail: 1266fail:
1271 for(i = 0; i < fb_helper->crtc_count; i++) { 1267 drm_atomic_clean_old_fb(dev, plane_mask, ret);
1272 struct drm_mode_set *mode_set;
1273 struct drm_plane *plane;
1274
1275 mode_set = &fb_helper->crtc_info[i].mode_set;
1276 plane = mode_set->crtc->primary;
1277
1278 if (ret == 0) {
1279 struct drm_framebuffer *new_fb = plane->state->fb;
1280
1281 if (new_fb)
1282 drm_framebuffer_reference(new_fb);
1283 plane->fb = new_fb;
1284 plane->crtc = plane->state->crtc;
1285
1286 if (plane->old_fb)
1287 drm_framebuffer_unreference(plane->old_fb);
1288 }
1289 plane->old_fb = NULL;
1290 }
1291 1268
1292 if (ret == -EDEADLK) 1269 if (ret == -EDEADLK)
1293 goto backoff; 1270 goto backoff;
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index c59ce4d0ef75..1ea8790e5090 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -126,6 +126,60 @@ static int drm_cpu_valid(void)
126} 126}
127 127
128/** 128/**
129 * drm_new_set_master - Allocate a new master object and become master for the
130 * associated master realm.
131 *
132 * @dev: The associated device.
133 * @fpriv: File private identifying the client.
134 *
135 * This function must be called with dev::struct_mutex held.
136 * Returns negative error code on failure. Zero on success.
137 */
138int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
139{
140 struct drm_master *old_master;
141 int ret;
142
143 lockdep_assert_held_once(&dev->master_mutex);
144
145 /* create a new master */
146 fpriv->minor->master = drm_master_create(fpriv->minor);
147 if (!fpriv->minor->master)
148 return -ENOMEM;
149
150 /* take another reference for the copy in the local file priv */
151 old_master = fpriv->master;
152 fpriv->master = drm_master_get(fpriv->minor->master);
153
154 if (dev->driver->master_create) {
155 ret = dev->driver->master_create(dev, fpriv->master);
156 if (ret)
157 goto out_err;
158 }
159 if (dev->driver->master_set) {
160 ret = dev->driver->master_set(dev, fpriv, true);
161 if (ret)
162 goto out_err;
163 }
164
165 fpriv->is_master = 1;
166 fpriv->allowed_master = 1;
167 fpriv->authenticated = 1;
168 if (old_master)
169 drm_master_put(&old_master);
170
171 return 0;
172
173out_err:
174 /* drop both references and restore old master on failure */
175 drm_master_put(&fpriv->minor->master);
176 drm_master_put(&fpriv->master);
177 fpriv->master = old_master;
178
179 return ret;
180}
181
182/**
129 * Called whenever a process opens /dev/drm. 183 * Called whenever a process opens /dev/drm.
130 * 184 *
131 * \param filp file pointer. 185 * \param filp file pointer.
@@ -172,6 +226,8 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
172 init_waitqueue_head(&priv->event_wait); 226 init_waitqueue_head(&priv->event_wait);
173 priv->event_space = 4096; /* set aside 4k for event buffer */ 227 priv->event_space = 4096; /* set aside 4k for event buffer */
174 228
229 mutex_init(&priv->event_read_lock);
230
175 if (drm_core_check_feature(dev, DRIVER_GEM)) 231 if (drm_core_check_feature(dev, DRIVER_GEM))
176 drm_gem_open(dev, priv); 232 drm_gem_open(dev, priv);
177 233
@@ -189,35 +245,9 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
189 mutex_lock(&dev->master_mutex); 245 mutex_lock(&dev->master_mutex);
190 if (drm_is_primary_client(priv) && !priv->minor->master) { 246 if (drm_is_primary_client(priv) && !priv->minor->master) {
191 /* create a new master */ 247 /* create a new master */
192 priv->minor->master = drm_master_create(priv->minor); 248 ret = drm_new_set_master(dev, priv);
193 if (!priv->minor->master) { 249 if (ret)
194 ret = -ENOMEM;
195 goto out_close; 250 goto out_close;
196 }
197
198 priv->is_master = 1;
199 /* take another reference for the copy in the local file priv */
200 priv->master = drm_master_get(priv->minor->master);
201 priv->authenticated = 1;
202
203 if (dev->driver->master_create) {
204 ret = dev->driver->master_create(dev, priv->master);
205 if (ret) {
206 /* drop both references if this fails */
207 drm_master_put(&priv->minor->master);
208 drm_master_put(&priv->master);
209 goto out_close;
210 }
211 }
212 if (dev->driver->master_set) {
213 ret = dev->driver->master_set(dev, priv, true);
214 if (ret) {
215 /* drop both references if this fails */
216 drm_master_put(&priv->minor->master);
217 drm_master_put(&priv->master);
218 goto out_close;
219 }
220 }
221 } else if (drm_is_primary_client(priv)) { 251 } else if (drm_is_primary_client(priv)) {
222 /* get a reference to the master */ 252 /* get a reference to the master */
223 priv->master = drm_master_get(priv->minor->master); 253 priv->master = drm_master_get(priv->minor->master);
@@ -483,14 +513,28 @@ ssize_t drm_read(struct file *filp, char __user *buffer,
483{ 513{
484 struct drm_file *file_priv = filp->private_data; 514 struct drm_file *file_priv = filp->private_data;
485 struct drm_device *dev = file_priv->minor->dev; 515 struct drm_device *dev = file_priv->minor->dev;
486 ssize_t ret = 0; 516 ssize_t ret;
487 517
488 if (!access_ok(VERIFY_WRITE, buffer, count)) 518 if (!access_ok(VERIFY_WRITE, buffer, count))
489 return -EFAULT; 519 return -EFAULT;
490 520
491 spin_lock_irq(&dev->event_lock); 521 ret = mutex_lock_interruptible(&file_priv->event_read_lock);
522 if (ret)
523 return ret;
524
492 for (;;) { 525 for (;;) {
493 if (list_empty(&file_priv->event_list)) { 526 struct drm_pending_event *e = NULL;
527
528 spin_lock_irq(&dev->event_lock);
529 if (!list_empty(&file_priv->event_list)) {
530 e = list_first_entry(&file_priv->event_list,
531 struct drm_pending_event, link);
532 file_priv->event_space += e->event->length;
533 list_del(&e->link);
534 }
535 spin_unlock_irq(&dev->event_lock);
536
537 if (e == NULL) {
494 if (ret) 538 if (ret)
495 break; 539 break;
496 540
@@ -499,36 +543,36 @@ ssize_t drm_read(struct file *filp, char __user *buffer,
499 break; 543 break;
500 } 544 }
501 545
502 spin_unlock_irq(&dev->event_lock); 546 mutex_unlock(&file_priv->event_read_lock);
503 ret = wait_event_interruptible(file_priv->event_wait, 547 ret = wait_event_interruptible(file_priv->event_wait,
504 !list_empty(&file_priv->event_list)); 548 !list_empty(&file_priv->event_list));
505 spin_lock_irq(&dev->event_lock); 549 if (ret >= 0)
506 if (ret < 0) 550 ret = mutex_lock_interruptible(&file_priv->event_read_lock);
507 break; 551 if (ret)
508 552 return ret;
509 ret = 0;
510 } else { 553 } else {
511 struct drm_pending_event *e; 554 unsigned length = e->event->length;
512 555
513 e = list_first_entry(&file_priv->event_list, 556 if (length > count - ret) {
514 struct drm_pending_event, link); 557put_back_event:
515 if (e->event->length + ret > count) 558 spin_lock_irq(&dev->event_lock);
559 file_priv->event_space -= length;
560 list_add(&e->link, &file_priv->event_list);
561 spin_unlock_irq(&dev->event_lock);
516 break; 562 break;
563 }
517 564
518 if (__copy_to_user_inatomic(buffer + ret, 565 if (copy_to_user(buffer + ret, e->event, length)) {
519 e->event, e->event->length)) {
520 if (ret == 0) 566 if (ret == 0)
521 ret = -EFAULT; 567 ret = -EFAULT;
522 break; 568 goto put_back_event;
523 } 569 }
524 570
525 file_priv->event_space += e->event->length; 571 ret += length;
526 ret += e->event->length;
527 list_del(&e->link);
528 e->destroy(e); 572 e->destroy(e);
529 } 573 }
530 } 574 }
531 spin_unlock_irq(&dev->event_lock); 575 mutex_unlock(&file_priv->event_read_lock);
532 576
533 return ret; 577 return ret;
534} 578}
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index c7de454e8e88..2e10bba4468b 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -244,8 +244,9 @@ drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
244 * @filp: drm file-private structure to use for the handle look up 244 * @filp: drm file-private structure to use for the handle look up
245 * @handle: userspace handle to delete 245 * @handle: userspace handle to delete
246 * 246 *
247 * Removes the GEM handle from the @filp lookup table and if this is the last 247 * Removes the GEM handle from the @filp lookup table which has been added with
248 * handle also cleans up linked resources like GEM names. 248 * drm_gem_handle_create(). If this is the last handle also cleans up linked
249 * resources like GEM names.
249 */ 250 */
250int 251int
251drm_gem_handle_delete(struct drm_file *filp, u32 handle) 252drm_gem_handle_delete(struct drm_file *filp, u32 handle)
@@ -314,6 +315,10 @@ EXPORT_SYMBOL(drm_gem_dumb_destroy);
314 * This expects the dev->object_name_lock to be held already and will drop it 315 * This expects the dev->object_name_lock to be held already and will drop it
315 * before returning. Used to avoid races in establishing new handles when 316 * before returning. Used to avoid races in establishing new handles when
316 * importing an object from either an flink name or a dma-buf. 317 * importing an object from either an flink name or a dma-buf.
318 *
319 * Handles must be release again through drm_gem_handle_delete(). This is done
320 * when userspace closes @file_priv for all attached handles, or through the
321 * GEM_CLOSE ioctl for individual handles.
317 */ 322 */
318int 323int
319drm_gem_handle_create_tail(struct drm_file *file_priv, 324drm_gem_handle_create_tail(struct drm_file *file_priv,
@@ -541,7 +546,17 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
541} 546}
542EXPORT_SYMBOL(drm_gem_put_pages); 547EXPORT_SYMBOL(drm_gem_put_pages);
543 548
544/** Returns a reference to the object named by the handle. */ 549/**
550 * drm_gem_object_lookup - look up a GEM object from it's handle
551 * @dev: DRM device
552 * @filp: DRM file private date
553 * @handle: userspace handle
554 *
555 * Returns:
556 *
557 * A reference to the object named by the handle if such exists on @filp, NULL
558 * otherwise.
559 */
545struct drm_gem_object * 560struct drm_gem_object *
546drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, 561drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
547 u32 handle) 562 u32 handle)
@@ -774,6 +789,13 @@ drm_gem_object_free(struct kref *kref)
774} 789}
775EXPORT_SYMBOL(drm_gem_object_free); 790EXPORT_SYMBOL(drm_gem_object_free);
776 791
792/**
793 * drm_gem_vm_open - vma->ops->open implementation for GEM
794 * @vma: VM area structure
795 *
796 * This function implements the #vm_operations_struct open() callback for GEM
797 * drivers. This must be used together with drm_gem_vm_close().
798 */
777void drm_gem_vm_open(struct vm_area_struct *vma) 799void drm_gem_vm_open(struct vm_area_struct *vma)
778{ 800{
779 struct drm_gem_object *obj = vma->vm_private_data; 801 struct drm_gem_object *obj = vma->vm_private_data;
@@ -782,6 +804,13 @@ void drm_gem_vm_open(struct vm_area_struct *vma)
782} 804}
783EXPORT_SYMBOL(drm_gem_vm_open); 805EXPORT_SYMBOL(drm_gem_vm_open);
784 806
807/**
808 * drm_gem_vm_close - vma->ops->close implementation for GEM
809 * @vma: VM area structure
810 *
811 * This function implements the #vm_operations_struct close() callback for GEM
812 * drivers. This must be used together with drm_gem_vm_open().
813 */
785void drm_gem_vm_close(struct vm_area_struct *vma) 814void drm_gem_vm_close(struct vm_area_struct *vma)
786{ 815{
787 struct drm_gem_object *obj = vma->vm_private_data; 816 struct drm_gem_object *obj = vma->vm_private_data;
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index e109b49cd25d..0f7b00ba57da 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -59,11 +59,13 @@ __drm_gem_cma_create(struct drm_device *drm, size_t size)
59 struct drm_gem_object *gem_obj; 59 struct drm_gem_object *gem_obj;
60 int ret; 60 int ret;
61 61
62 cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL); 62 if (drm->driver->gem_create_object)
63 if (!cma_obj) 63 gem_obj = drm->driver->gem_create_object(drm, size);
64 else
65 gem_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
66 if (!gem_obj)
64 return ERR_PTR(-ENOMEM); 67 return ERR_PTR(-ENOMEM);
65 68 cma_obj = container_of(gem_obj, struct drm_gem_cma_object, base);
66 gem_obj = &cma_obj->base;
67 69
68 ret = drm_gem_object_init(drm, gem_obj, size); 70 ret = drm_gem_object_init(drm, gem_obj, size);
69 if (ret) 71 if (ret)
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 2151ea551d3b..607f493ae801 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -980,7 +980,8 @@ static void send_vblank_event(struct drm_device *dev,
980 struct drm_pending_vblank_event *e, 980 struct drm_pending_vblank_event *e,
981 unsigned long seq, struct timeval *now) 981 unsigned long seq, struct timeval *now)
982{ 982{
983 WARN_ON_SMP(!spin_is_locked(&dev->event_lock)); 983 assert_spin_locked(&dev->event_lock);
984
984 e->event.sequence = seq; 985 e->event.sequence = seq;
985 e->event.tv_sec = now->tv_sec; 986 e->event.tv_sec = now->tv_sec;
986 e->event.tv_usec = now->tv_usec; 987 e->event.tv_usec = now->tv_usec;
@@ -993,6 +994,57 @@ static void send_vblank_event(struct drm_device *dev,
993} 994}
994 995
995/** 996/**
997 * drm_arm_vblank_event - arm vblank event after pageflip
998 * @dev: DRM device
999 * @pipe: CRTC index
1000 * @e: the event to prepare to send
1001 *
1002 * A lot of drivers need to generate vblank events for the very next vblank
1003 * interrupt. For example when the page flip interrupt happens when the page
1004 * flip gets armed, but not when it actually executes within the next vblank
1005 * period. This helper function implements exactly the required vblank arming
1006 * behaviour.
1007 *
1008 * Caller must hold event lock. Caller must also hold a vblank reference for
1009 * the event @e, which will be dropped when the next vblank arrives.
1010 *
1011 * This is the legacy version of drm_crtc_arm_vblank_event().
1012 */
1013void drm_arm_vblank_event(struct drm_device *dev, unsigned int pipe,
1014 struct drm_pending_vblank_event *e)
1015{
1016 assert_spin_locked(&dev->event_lock);
1017
1018 e->pipe = pipe;
1019 e->event.sequence = drm_vblank_count(dev, pipe);
1020 list_add_tail(&e->base.link, &dev->vblank_event_list);
1021}
1022EXPORT_SYMBOL(drm_arm_vblank_event);
1023
1024/**
1025 * drm_crtc_arm_vblank_event - arm vblank event after pageflip
1026 * @crtc: the source CRTC of the vblank event
1027 * @e: the event to send
1028 *
1029 * A lot of drivers need to generate vblank events for the very next vblank
1030 * interrupt. For example when the page flip interrupt happens when the page
1031 * flip gets armed, but not when it actually executes within the next vblank
1032 * period. This helper function implements exactly the required vblank arming
1033 * behaviour.
1034 *
1035 * Caller must hold event lock. Caller must also hold a vblank reference for
1036 * the event @e, which will be dropped when the next vblank arrives.
1037 *
1038 * This is the native KMS version of drm_arm_vblank_event().
1039 */
1040void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
1041 struct drm_pending_vblank_event *e)
1042{
1043 drm_arm_vblank_event(crtc->dev, drm_crtc_index(crtc), e);
1044}
1045EXPORT_SYMBOL(drm_crtc_arm_vblank_event);
1046
1047/**
996 * drm_send_vblank_event - helper to send vblank event after pageflip 1048 * drm_send_vblank_event - helper to send vblank event after pageflip
997 * @dev: DRM device 1049 * @dev: DRM device
998 * @pipe: CRTC index 1050 * @pipe: CRTC index
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index cd74a0953f42..5a8a78d5e960 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -708,7 +708,8 @@ void drm_mode_set_name(struct drm_display_mode *mode)
708} 708}
709EXPORT_SYMBOL(drm_mode_set_name); 709EXPORT_SYMBOL(drm_mode_set_name);
710 710
711/** drm_mode_hsync - get the hsync of a mode 711/**
712 * drm_mode_hsync - get the hsync of a mode
712 * @mode: mode 713 * @mode: mode
713 * 714 *
714 * Returns: 715 * Returns:
@@ -917,13 +918,30 @@ bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_displ
917 } else if (mode1->clock != mode2->clock) 918 } else if (mode1->clock != mode2->clock)
918 return false; 919 return false;
919 920
921 return drm_mode_equal_no_clocks(mode1, mode2);
922}
923EXPORT_SYMBOL(drm_mode_equal);
924
925/**
926 * drm_mode_equal_no_clocks - test modes for equality
927 * @mode1: first mode
928 * @mode2: second mode
929 *
930 * Check to see if @mode1 and @mode2 are equivalent, but
931 * don't check the pixel clocks.
932 *
933 * Returns:
934 * True if the modes are equal, false otherwise.
935 */
936bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
937{
920 if ((mode1->flags & DRM_MODE_FLAG_3D_MASK) != 938 if ((mode1->flags & DRM_MODE_FLAG_3D_MASK) !=
921 (mode2->flags & DRM_MODE_FLAG_3D_MASK)) 939 (mode2->flags & DRM_MODE_FLAG_3D_MASK))
922 return false; 940 return false;
923 941
924 return drm_mode_equal_no_clocks_no_stereo(mode1, mode2); 942 return drm_mode_equal_no_clocks_no_stereo(mode1, mode2);
925} 943}
926EXPORT_SYMBOL(drm_mode_equal); 944EXPORT_SYMBOL(drm_mode_equal_no_clocks);
927 945
928/** 946/**
929 * drm_mode_equal_no_clocks_no_stereo - test modes for equality 947 * drm_mode_equal_no_clocks_no_stereo - test modes for equality
@@ -1056,7 +1074,7 @@ static const char * const drm_mode_status_names[] = {
1056 MODE_STATUS(ONE_SIZE), 1074 MODE_STATUS(ONE_SIZE),
1057 MODE_STATUS(NO_REDUCED), 1075 MODE_STATUS(NO_REDUCED),
1058 MODE_STATUS(NO_STEREO), 1076 MODE_STATUS(NO_STEREO),
1059 MODE_STATUS(UNVERIFIED), 1077 MODE_STATUS(STALE),
1060 MODE_STATUS(BAD), 1078 MODE_STATUS(BAD),
1061 MODE_STATUS(ERROR), 1079 MODE_STATUS(ERROR),
1062}; 1080};
@@ -1154,7 +1172,6 @@ EXPORT_SYMBOL(drm_mode_sort);
1154/** 1172/**
1155 * drm_mode_connector_list_update - update the mode list for the connector 1173 * drm_mode_connector_list_update - update the mode list for the connector
1156 * @connector: the connector to update 1174 * @connector: the connector to update
1157 * @merge_type_bits: whether to merge or overwrite type bits
1158 * 1175 *
1159 * This moves the modes from the @connector probed_modes list 1176 * This moves the modes from the @connector probed_modes list
1160 * to the actual mode list. It compares the probed mode against the current 1177 * to the actual mode list. It compares the probed mode against the current
@@ -1163,33 +1180,48 @@ EXPORT_SYMBOL(drm_mode_sort);
1163 * This is just a helper functions doesn't validate any modes itself and also 1180 * This is just a helper functions doesn't validate any modes itself and also
1164 * doesn't prune any invalid modes. Callers need to do that themselves. 1181 * doesn't prune any invalid modes. Callers need to do that themselves.
1165 */ 1182 */
1166void drm_mode_connector_list_update(struct drm_connector *connector, 1183void drm_mode_connector_list_update(struct drm_connector *connector)
1167 bool merge_type_bits)
1168{ 1184{
1169 struct drm_display_mode *mode;
1170 struct drm_display_mode *pmode, *pt; 1185 struct drm_display_mode *pmode, *pt;
1171 int found_it;
1172 1186
1173 WARN_ON(!mutex_is_locked(&connector->dev->mode_config.mutex)); 1187 WARN_ON(!mutex_is_locked(&connector->dev->mode_config.mutex));
1174 1188
1175 list_for_each_entry_safe(pmode, pt, &connector->probed_modes, 1189 list_for_each_entry_safe(pmode, pt, &connector->probed_modes, head) {
1176 head) { 1190 struct drm_display_mode *mode;
1177 found_it = 0; 1191 bool found_it = false;
1192
1178 /* go through current modes checking for the new probed mode */ 1193 /* go through current modes checking for the new probed mode */
1179 list_for_each_entry(mode, &connector->modes, head) { 1194 list_for_each_entry(mode, &connector->modes, head) {
1180 if (drm_mode_equal(pmode, mode)) { 1195 if (!drm_mode_equal(pmode, mode))
1181 found_it = 1; 1196 continue;
1182 /* if equal delete the probed mode */ 1197
1183 mode->status = pmode->status; 1198 found_it = true;
1184 /* Merge type bits together */ 1199
1185 if (merge_type_bits) 1200 /*
1186 mode->type |= pmode->type; 1201 * If the old matching mode is stale (ie. left over
1187 else 1202 * from a previous probe) just replace it outright.
1188 mode->type = pmode->type; 1203 * Otherwise just merge the type bits between all
1189 list_del(&pmode->head); 1204 * equal probed modes.
1190 drm_mode_destroy(connector->dev, pmode); 1205 *
1191 break; 1206 * If two probed modes are considered equal, pick the
1207 * actual timings from the one that's marked as
1208 * preferred (in case the match isn't 100%). If
1209 * multiple or zero preferred modes are present, favor
1210 * the mode added to the probed_modes list first.
1211 */
1212 if (mode->status == MODE_STALE) {
1213 drm_mode_copy(mode, pmode);
1214 } else if ((mode->type & DRM_MODE_TYPE_PREFERRED) == 0 &&
1215 (pmode->type & DRM_MODE_TYPE_PREFERRED) != 0) {
1216 pmode->type |= mode->type;
1217 drm_mode_copy(mode, pmode);
1218 } else {
1219 mode->type |= pmode->type;
1192 } 1220 }
1221
1222 list_del(&pmode->head);
1223 drm_mode_destroy(connector->dev, pmode);
1224 break;
1193 } 1225 }
1194 1226
1195 if (!found_it) { 1227 if (!found_it) {
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index 6675b1428410..c2f5971146ba 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -57,11 +57,18 @@
57 57
58/** 58/**
59 * drm_modeset_lock_all - take all modeset locks 59 * drm_modeset_lock_all - take all modeset locks
60 * @dev: drm device 60 * @dev: DRM device
61 * 61 *
62 * This function takes all modeset locks, suitable where a more fine-grained 62 * This function takes all modeset locks, suitable where a more fine-grained
63 * scheme isn't (yet) implemented. Locks must be dropped with 63 * scheme isn't (yet) implemented. Locks must be dropped by calling the
64 * drm_modeset_unlock_all. 64 * drm_modeset_unlock_all() function.
65 *
66 * This function is deprecated. It allocates a lock acquisition context and
67 * stores it in the DRM device's ->mode_config. This facilitate conversion of
68 * existing code because it removes the need to manually deal with the
69 * acquisition context, but it is also brittle because the context is global
70 * and care must be taken not to nest calls. New code should use the
71 * drm_modeset_lock_all_ctx() function and pass in the context explicitly.
65 */ 72 */
66void drm_modeset_lock_all(struct drm_device *dev) 73void drm_modeset_lock_all(struct drm_device *dev)
67{ 74{
@@ -78,39 +85,43 @@ void drm_modeset_lock_all(struct drm_device *dev)
78 drm_modeset_acquire_init(ctx, 0); 85 drm_modeset_acquire_init(ctx, 0);
79 86
80retry: 87retry:
81 ret = drm_modeset_lock(&config->connection_mutex, ctx); 88 ret = drm_modeset_lock_all_ctx(dev, ctx);
82 if (ret) 89 if (ret < 0) {
83 goto fail; 90 if (ret == -EDEADLK) {
84 ret = drm_modeset_lock_all_crtcs(dev, ctx); 91 drm_modeset_backoff(ctx);
85 if (ret) 92 goto retry;
86 goto fail; 93 }
94
95 drm_modeset_acquire_fini(ctx);
96 kfree(ctx);
97 return;
98 }
87 99
88 WARN_ON(config->acquire_ctx); 100 WARN_ON(config->acquire_ctx);
89 101
90 /* now we hold the locks, so now that it is safe, stash the 102 /*
91 * ctx for drm_modeset_unlock_all(): 103 * We hold the locks now, so it is safe to stash the acquisition
104 * context for drm_modeset_unlock_all().
92 */ 105 */
93 config->acquire_ctx = ctx; 106 config->acquire_ctx = ctx;
94 107
95 drm_warn_on_modeset_not_all_locked(dev); 108 drm_warn_on_modeset_not_all_locked(dev);
96
97 return;
98
99fail:
100 if (ret == -EDEADLK) {
101 drm_modeset_backoff(ctx);
102 goto retry;
103 }
104
105 kfree(ctx);
106} 109}
107EXPORT_SYMBOL(drm_modeset_lock_all); 110EXPORT_SYMBOL(drm_modeset_lock_all);
108 111
109/** 112/**
110 * drm_modeset_unlock_all - drop all modeset locks 113 * drm_modeset_unlock_all - drop all modeset locks
111 * @dev: device 114 * @dev: DRM device
112 * 115 *
113 * This function drop all modeset locks taken by drm_modeset_lock_all. 116 * This function drops all modeset locks taken by a previous call to the
117 * drm_modeset_lock_all() function.
118 *
119 * This function is deprecated. It uses the lock acquisition context stored
120 * in the DRM device's ->mode_config. This facilitates conversion of existing
121 * code because it removes the need to manually deal with the acquisition
122 * context, but it is also brittle because the context is global and care must
123 * be taken not to nest calls. New code should pass the acquisition context
124 * directly to the drm_modeset_drop_locks() function.
114 */ 125 */
115void drm_modeset_unlock_all(struct drm_device *dev) 126void drm_modeset_unlock_all(struct drm_device *dev)
116{ 127{
@@ -431,14 +442,34 @@ void drm_modeset_unlock(struct drm_modeset_lock *lock)
431} 442}
432EXPORT_SYMBOL(drm_modeset_unlock); 443EXPORT_SYMBOL(drm_modeset_unlock);
433 444
434/* In some legacy codepaths it's convenient to just grab all the crtc and plane 445/**
435 * related locks. */ 446 * drm_modeset_lock_all_ctx - take all modeset locks
436int drm_modeset_lock_all_crtcs(struct drm_device *dev, 447 * @dev: DRM device
437 struct drm_modeset_acquire_ctx *ctx) 448 * @ctx: lock acquisition context
449 *
450 * This function takes all modeset locks, suitable where a more fine-grained
451 * scheme isn't (yet) implemented.
452 *
453 * Unlike drm_modeset_lock_all(), it doesn't take the dev->mode_config.mutex
454 * since that lock isn't required for modeset state changes. Callers which
455 * need to grab that lock too need to do so outside of the acquire context
456 * @ctx.
457 *
458 * Locks acquired with this function should be released by calling the
459 * drm_modeset_drop_locks() function on @ctx.
460 *
461 * Returns: 0 on success or a negative error-code on failure.
462 */
463int drm_modeset_lock_all_ctx(struct drm_device *dev,
464 struct drm_modeset_acquire_ctx *ctx)
438{ 465{
439 struct drm_crtc *crtc; 466 struct drm_crtc *crtc;
440 struct drm_plane *plane; 467 struct drm_plane *plane;
441 int ret = 0; 468 int ret;
469
470 ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx);
471 if (ret)
472 return ret;
442 473
443 drm_for_each_crtc(crtc, dev) { 474 drm_for_each_crtc(crtc, dev) {
444 ret = drm_modeset_lock(&crtc->mutex, ctx); 475 ret = drm_modeset_lock(&crtc->mutex, ctx);
@@ -454,4 +485,4 @@ int drm_modeset_lock_all_crtcs(struct drm_device *dev,
454 485
455 return 0; 486 return 0;
456} 487}
457EXPORT_SYMBOL(drm_modeset_lock_all_crtcs); 488EXPORT_SYMBOL(drm_modeset_lock_all_ctx);
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index d384ebcf0aaf..369d2898ff9e 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -57,6 +57,10 @@
57 * by the atomic helpers. 57 * by the atomic helpers.
58 * 58 *
59 * Again drivers are strongly urged to switch to the new interfaces. 59 * Again drivers are strongly urged to switch to the new interfaces.
60 *
61 * The plane helpers share the function table structures with other helpers,
62 * specifically also the atomic helpers. See struct &drm_plane_helper_funcs for
63 * the details.
60 */ 64 */
61 65
62/* 66/*
@@ -164,6 +168,8 @@ int drm_plane_helper_check_update(struct drm_plane *plane,
164 vscale = drm_rect_calc_vscale(src, dest, min_scale, max_scale); 168 vscale = drm_rect_calc_vscale(src, dest, min_scale, max_scale);
165 if (hscale < 0 || vscale < 0) { 169 if (hscale < 0 || vscale < 0) {
166 DRM_DEBUG_KMS("Invalid scaling of plane\n"); 170 DRM_DEBUG_KMS("Invalid scaling of plane\n");
171 drm_rect_debug_print("src: ", src, true);
172 drm_rect_debug_print("dst: ", dest, false);
167 return -ERANGE; 173 return -ERANGE;
168 } 174 }
169 175
@@ -180,6 +186,8 @@ int drm_plane_helper_check_update(struct drm_plane *plane,
180 186
181 if (!can_position && !drm_rect_equals(dest, clip)) { 187 if (!can_position && !drm_rect_equals(dest, clip)) {
182 DRM_DEBUG_KMS("Plane must cover entire CRTC\n"); 188 DRM_DEBUG_KMS("Plane must cover entire CRTC\n");
189 drm_rect_debug_print("dst: ", dest, false);
190 drm_rect_debug_print("clip: ", clip, false);
183 return -EINVAL; 191 return -EINVAL;
184 } 192 }
185 193
@@ -367,7 +375,7 @@ static struct drm_plane *create_primary_plane(struct drm_device *dev)
367 &drm_primary_helper_funcs, 375 &drm_primary_helper_funcs,
368 safe_modeset_formats, 376 safe_modeset_formats,
369 ARRAY_SIZE(safe_modeset_formats), 377 ARRAY_SIZE(safe_modeset_formats),
370 DRM_PLANE_TYPE_PRIMARY); 378 DRM_PLANE_TYPE_PRIMARY, NULL);
371 if (ret) { 379 if (ret) {
372 kfree(primary); 380 kfree(primary);
373 primary = NULL; 381 primary = NULL;
@@ -394,7 +402,8 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
394 struct drm_plane *primary; 402 struct drm_plane *primary;
395 403
396 primary = create_primary_plane(dev); 404 primary = create_primary_plane(dev);
397 return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs); 405 return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs,
406 NULL);
398} 407}
399EXPORT_SYMBOL(drm_crtc_init); 408EXPORT_SYMBOL(drm_crtc_init);
400 409
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index a18164f2f6d2..e714b5a7955f 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -53,6 +53,9 @@
53 * This helper library can be used independently of the modeset helper library. 53 * This helper library can be used independently of the modeset helper library.
54 * Drivers can also overwrite different parts e.g. use their own hotplug 54 * Drivers can also overwrite different parts e.g. use their own hotplug
55 * handling code to avoid probing unrelated outputs. 55 * handling code to avoid probing unrelated outputs.
56 *
57 * The probe helpers share the function table structures with other display
58 * helper libraries. See struct &drm_connector_helper_funcs for the details.
56 */ 59 */
57 60
58static bool drm_kms_helper_poll = true; 61static bool drm_kms_helper_poll = true;
@@ -126,9 +129,64 @@ void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
126} 129}
127EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked); 130EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked);
128 131
129 132/**
130static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector, 133 * drm_helper_probe_single_connector_modes - get complete set of display modes
131 uint32_t maxX, uint32_t maxY, bool merge_type_bits) 134 * @connector: connector to probe
135 * @maxX: max width for modes
136 * @maxY: max height for modes
137 *
138 * Based on the helper callbacks implemented by @connector in struct
139 * &drm_connector_helper_funcs try to detect all valid modes. Modes will first
140 * be added to the connector's probed_modes list, then culled (based on validity
141 * and the @maxX, @maxY parameters) and put into the normal modes list.
142 *
143 * Intended to be used as a generic implementation of the ->fill_modes()
144 * @connector vfunc for drivers that use the CRTC helpers for output mode
145 * filtering and detection.
146 *
147 * The basic procedure is as follows
148 *
149 * 1. All modes currently on the connector's modes list are marked as stale
150 *
151 * 2. New modes are added to the connector's probed_modes list with
152 * drm_mode_probed_add(). New modes start their life with status as OK.
153 * Modes are added from a single source using the following priority order.
154 *
155 * - debugfs 'override_edid' (used for testing only)
156 * - firmware EDID (drm_load_edid_firmware())
157 * - connector helper ->get_modes() vfunc
158 * - if the connector status is connector_status_connected, standard
159 * VESA DMT modes up to 1024x768 are automatically added
160 * (drm_add_modes_noedid())
161 *
162 * Finally modes specified via the kernel command line (video=...) are
163 * added in addition to what the earlier probes produced
164 * (drm_helper_probe_add_cmdline_mode()). These modes are generated
165 * using the VESA GTF/CVT formulas.
166 *
167 * 3. Modes are moved from the probed_modes list to the modes list. Potential
168 * duplicates are merged together (see drm_mode_connector_list_update()).
169 * After this step the probed_modes list will be empty again.
170 *
171 * 4. Any non-stale mode on the modes list then undergoes validation
172 *
173 * - drm_mode_validate_basic() performs basic sanity checks
174 * - drm_mode_validate_size() filters out modes larger than @maxX and @maxY
175 * (if specified)
176 * - drm_mode_validate_flag() checks the modes againt basic connector
177 * capabilites (interlace_allowed,doublescan_allowed,stereo_allowed)
178 * - the optional connector ->mode_valid() helper can perform driver and/or
179 * hardware specific checks
180 *
181 * 5. Any mode whose status is not OK is pruned from the connector's modes list,
182 * accompanied by a debug message indicating the reason for the mode's
183 * rejection (see drm_mode_prune_invalid()).
184 *
185 * Returns:
186 * The number of modes found on @connector.
187 */
188int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
189 uint32_t maxX, uint32_t maxY)
132{ 190{
133 struct drm_device *dev = connector->dev; 191 struct drm_device *dev = connector->dev;
134 struct drm_display_mode *mode; 192 struct drm_display_mode *mode;
@@ -143,9 +201,11 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
143 201
144 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, 202 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
145 connector->name); 203 connector->name);
146 /* set all modes to the unverified state */ 204 /* set all old modes to the stale state */
147 list_for_each_entry(mode, &connector->modes, head) 205 list_for_each_entry(mode, &connector->modes, head)
148 mode->status = MODE_UNVERIFIED; 206 mode->status = MODE_STALE;
207
208 old_status = connector->status;
149 209
150 if (connector->force) { 210 if (connector->force) {
151 if (connector->force == DRM_FORCE_ON || 211 if (connector->force == DRM_FORCE_ON ||
@@ -156,33 +216,32 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
156 if (connector->funcs->force) 216 if (connector->funcs->force)
157 connector->funcs->force(connector); 217 connector->funcs->force(connector);
158 } else { 218 } else {
159 old_status = connector->status;
160
161 connector->status = connector->funcs->detect(connector, true); 219 connector->status = connector->funcs->detect(connector, true);
220 }
221
222 /*
223 * Normally either the driver's hpd code or the poll loop should
224 * pick up any changes and fire the hotplug event. But if
225 * userspace sneaks in a probe, we might miss a change. Hence
226 * check here, and if anything changed start the hotplug code.
227 */
228 if (old_status != connector->status) {
229 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
230 connector->base.id,
231 connector->name,
232 drm_get_connector_status_name(old_status),
233 drm_get_connector_status_name(connector->status));
162 234
163 /* 235 /*
164 * Normally either the driver's hpd code or the poll loop should 236 * The hotplug event code might call into the fb
165 * pick up any changes and fire the hotplug event. But if 237 * helpers, and so expects that we do not hold any
166 * userspace sneaks in a probe, we might miss a change. Hence 238 * locks. Fire up the poll struct instead, it will
167 * check here, and if anything changed start the hotplug code. 239 * disable itself again.
168 */ 240 */
169 if (old_status != connector->status) { 241 dev->mode_config.delayed_event = true;
170 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", 242 if (dev->mode_config.poll_enabled)
171 connector->base.id, 243 schedule_delayed_work(&dev->mode_config.output_poll_work,
172 connector->name, 244 0);
173 old_status, connector->status);
174
175 /*
176 * The hotplug event code might call into the fb
177 * helpers, and so expects that we do not hold any
178 * locks. Fire up the poll struct instead, it will
179 * disable itself again.
180 */
181 dev->mode_config.delayed_event = true;
182 if (dev->mode_config.poll_enabled)
183 schedule_delayed_work(&dev->mode_config.output_poll_work,
184 0);
185 }
186 } 245 }
187 246
188 /* Re-enable polling in case the global poll config changed. */ 247 /* Re-enable polling in case the global poll config changed. */
@@ -199,17 +258,16 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
199 goto prune; 258 goto prune;
200 } 259 }
201 260
261 if (connector->override_edid) {
262 struct edid *edid = (struct edid *) connector->edid_blob_ptr->data;
263
264 count = drm_add_edid_modes(connector, edid);
265 drm_edid_to_eld(connector, edid);
266 } else {
202#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE 267#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
203 count = drm_load_edid_firmware(connector); 268 count = drm_load_edid_firmware(connector);
204 if (count == 0) 269 if (count == 0)
205#endif 270#endif
206 {
207 if (connector->override_edid) {
208 struct edid *edid = (struct edid *) connector->edid_blob_ptr->data;
209
210 count = drm_add_edid_modes(connector, edid);
211 drm_edid_to_eld(connector, edid);
212 } else
213 count = (*connector_funcs->get_modes)(connector); 271 count = (*connector_funcs->get_modes)(connector);
214 } 272 }
215 273
@@ -219,7 +277,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
219 if (count == 0) 277 if (count == 0)
220 goto prune; 278 goto prune;
221 279
222 drm_mode_connector_list_update(connector, merge_type_bits); 280 drm_mode_connector_list_update(connector);
223 281
224 if (connector->interlace_allowed) 282 if (connector->interlace_allowed)
225 mode_flags |= DRM_MODE_FLAG_INTERLACE; 283 mode_flags |= DRM_MODE_FLAG_INTERLACE;
@@ -229,7 +287,8 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
229 mode_flags |= DRM_MODE_FLAG_3D_MASK; 287 mode_flags |= DRM_MODE_FLAG_3D_MASK;
230 288
231 list_for_each_entry(mode, &connector->modes, head) { 289 list_for_each_entry(mode, &connector->modes, head) {
232 mode->status = drm_mode_validate_basic(mode); 290 if (mode->status == MODE_OK)
291 mode->status = drm_mode_validate_basic(mode);
233 292
234 if (mode->status == MODE_OK) 293 if (mode->status == MODE_OK)
235 mode->status = drm_mode_validate_size(mode, maxX, maxY); 294 mode->status = drm_mode_validate_size(mode, maxX, maxY);
@@ -262,49 +321,9 @@ prune:
262 321
263 return count; 322 return count;
264} 323}
265
266/**
267 * drm_helper_probe_single_connector_modes - get complete set of display modes
268 * @connector: connector to probe
269 * @maxX: max width for modes
270 * @maxY: max height for modes
271 *
272 * Based on the helper callbacks implemented by @connector try to detect all
273 * valid modes. Modes will first be added to the connector's probed_modes list,
274 * then culled (based on validity and the @maxX, @maxY parameters) and put into
275 * the normal modes list.
276 *
277 * Intended to be use as a generic implementation of the ->fill_modes()
278 * @connector vfunc for drivers that use the crtc helpers for output mode
279 * filtering and detection.
280 *
281 * Returns:
282 * The number of modes found on @connector.
283 */
284int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
285 uint32_t maxX, uint32_t maxY)
286{
287 return drm_helper_probe_single_connector_modes_merge_bits(connector, maxX, maxY, true);
288}
289EXPORT_SYMBOL(drm_helper_probe_single_connector_modes); 324EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
290 325
291/** 326/**
292 * drm_helper_probe_single_connector_modes_nomerge - get complete set of display modes
293 * @connector: connector to probe
294 * @maxX: max width for modes
295 * @maxY: max height for modes
296 *
297 * This operates like drm_hehlper_probe_single_connector_modes except it
298 * replaces the mode bits instead of merging them for preferred modes.
299 */
300int drm_helper_probe_single_connector_modes_nomerge(struct drm_connector *connector,
301 uint32_t maxX, uint32_t maxY)
302{
303 return drm_helper_probe_single_connector_modes_merge_bits(connector, maxX, maxY, false);
304}
305EXPORT_SYMBOL(drm_helper_probe_single_connector_modes_nomerge);
306
307/**
308 * drm_kms_helper_hotplug_event - fire off KMS hotplug events 327 * drm_kms_helper_hotplug_event - fire off KMS hotplug events
309 * @dev: drm_device whose connector state changed 328 * @dev: drm_device whose connector state changed
310 * 329 *
diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c
index 531ac4cc9756..a8e2c8603945 100644
--- a/drivers/gpu/drm/drm_rect.c
+++ b/drivers/gpu/drm/drm_rect.c
@@ -275,22 +275,23 @@ EXPORT_SYMBOL(drm_rect_calc_vscale_relaxed);
275 275
276/** 276/**
277 * drm_rect_debug_print - print the rectangle information 277 * drm_rect_debug_print - print the rectangle information
278 * @prefix: prefix string
278 * @r: rectangle to print 279 * @r: rectangle to print
279 * @fixed_point: rectangle is in 16.16 fixed point format 280 * @fixed_point: rectangle is in 16.16 fixed point format
280 */ 281 */
281void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point) 282void drm_rect_debug_print(const char *prefix, const struct drm_rect *r, bool fixed_point)
282{ 283{
283 int w = drm_rect_width(r); 284 int w = drm_rect_width(r);
284 int h = drm_rect_height(r); 285 int h = drm_rect_height(r);
285 286
286 if (fixed_point) 287 if (fixed_point)
287 DRM_DEBUG_KMS("%d.%06ux%d.%06u%+d.%06u%+d.%06u\n", 288 DRM_DEBUG_KMS("%s%d.%06ux%d.%06u%+d.%06u%+d.%06u\n", prefix,
288 w >> 16, ((w & 0xffff) * 15625) >> 10, 289 w >> 16, ((w & 0xffff) * 15625) >> 10,
289 h >> 16, ((h & 0xffff) * 15625) >> 10, 290 h >> 16, ((h & 0xffff) * 15625) >> 10,
290 r->x1 >> 16, ((r->x1 & 0xffff) * 15625) >> 10, 291 r->x1 >> 16, ((r->x1 & 0xffff) * 15625) >> 10,
291 r->y1 >> 16, ((r->y1 & 0xffff) * 15625) >> 10); 292 r->y1 >> 16, ((r->y1 & 0xffff) * 15625) >> 10);
292 else 293 else
293 DRM_DEBUG_KMS("%dx%d%+d%+d\n", w, h, r->x1, r->y1); 294 DRM_DEBUG_KMS("%s%dx%d%+d%+d\n", prefix, w, h, r->x1, r->y1);
294} 295}
295EXPORT_SYMBOL(drm_rect_debug_print); 296EXPORT_SYMBOL(drm_rect_debug_print);
296 297
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 615b7e667320..0ca64106a97b 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -167,47 +167,35 @@ static ssize_t status_store(struct device *device,
167{ 167{
168 struct drm_connector *connector = to_drm_connector(device); 168 struct drm_connector *connector = to_drm_connector(device);
169 struct drm_device *dev = connector->dev; 169 struct drm_device *dev = connector->dev;
170 enum drm_connector_status old_status; 170 enum drm_connector_force old_force;
171 int ret; 171 int ret;
172 172
173 ret = mutex_lock_interruptible(&dev->mode_config.mutex); 173 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
174 if (ret) 174 if (ret)
175 return ret; 175 return ret;
176 176
177 old_status = connector->status; 177 old_force = connector->force;
178 178
179 if (sysfs_streq(buf, "detect")) { 179 if (sysfs_streq(buf, "detect"))
180 connector->force = 0; 180 connector->force = 0;
181 connector->status = connector->funcs->detect(connector, true); 181 else if (sysfs_streq(buf, "on"))
182 } else if (sysfs_streq(buf, "on")) {
183 connector->force = DRM_FORCE_ON; 182 connector->force = DRM_FORCE_ON;
184 } else if (sysfs_streq(buf, "on-digital")) { 183 else if (sysfs_streq(buf, "on-digital"))
185 connector->force = DRM_FORCE_ON_DIGITAL; 184 connector->force = DRM_FORCE_ON_DIGITAL;
186 } else if (sysfs_streq(buf, "off")) { 185 else if (sysfs_streq(buf, "off"))
187 connector->force = DRM_FORCE_OFF; 186 connector->force = DRM_FORCE_OFF;
188 } else 187 else
189 ret = -EINVAL; 188 ret = -EINVAL;
190 189
191 if (ret == 0 && connector->force) { 190 if (old_force != connector->force || !connector->force) {
192 if (connector->force == DRM_FORCE_ON || 191 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force updated from %d to %d or reprobing\n",
193 connector->force == DRM_FORCE_ON_DIGITAL)
194 connector->status = connector_status_connected;
195 else
196 connector->status = connector_status_disconnected;
197 if (connector->funcs->force)
198 connector->funcs->force(connector);
199 }
200
201 if (old_status != connector->status) {
202 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
203 connector->base.id, 192 connector->base.id,
204 connector->name, 193 connector->name,
205 old_status, connector->status); 194 old_force, connector->force);
206 195
207 dev->mode_config.delayed_event = true; 196 connector->funcs->fill_modes(connector,
208 if (dev->mode_config.poll_enabled) 197 dev->mode_config.max_width,
209 schedule_delayed_work(&dev->mode_config.output_poll_work, 198 dev->mode_config.max_height);
210 0);
211 } 199 }
212 200
213 mutex_unlock(&dev->mode_config.mutex); 201 mutex_unlock(&dev->mode_config.mutex);
@@ -256,23 +244,29 @@ static ssize_t edid_show(struct file *filp, struct kobject *kobj,
256 struct drm_connector *connector = to_drm_connector(connector_dev); 244 struct drm_connector *connector = to_drm_connector(connector_dev);
257 unsigned char *edid; 245 unsigned char *edid;
258 size_t size; 246 size_t size;
247 ssize_t ret = 0;
259 248
249 mutex_lock(&connector->dev->mode_config.mutex);
260 if (!connector->edid_blob_ptr) 250 if (!connector->edid_blob_ptr)
261 return 0; 251 goto unlock;
262 252
263 edid = connector->edid_blob_ptr->data; 253 edid = connector->edid_blob_ptr->data;
264 size = connector->edid_blob_ptr->length; 254 size = connector->edid_blob_ptr->length;
265 if (!edid) 255 if (!edid)
266 return 0; 256 goto unlock;
267 257
268 if (off >= size) 258 if (off >= size)
269 return 0; 259 goto unlock;
270 260
271 if (off + count > size) 261 if (off + count > size)
272 count = size - off; 262 count = size - off;
273 memcpy(buf, edid + off, count); 263 memcpy(buf, edid + off, count);
274 264
275 return count; 265 ret = count;
266unlock:
267 mutex_unlock(&connector->dev->mode_config.mutex);
268
269 return ret;
276} 270}
277 271
278static ssize_t modes_show(struct device *device, 272static ssize_t modes_show(struct device *device,
@@ -283,10 +277,12 @@ static ssize_t modes_show(struct device *device,
283 struct drm_display_mode *mode; 277 struct drm_display_mode *mode;
284 int written = 0; 278 int written = 0;
285 279
280 mutex_lock(&connector->dev->mode_config.mutex);
286 list_for_each_entry(mode, &connector->modes, head) { 281 list_for_each_entry(mode, &connector->modes, head) {
287 written += snprintf(buf + written, PAGE_SIZE - written, "%s\n", 282 written += snprintf(buf + written, PAGE_SIZE - written, "%s\n",
288 mode->name); 283 mode->name);
289 } 284 }
285 mutex_unlock(&connector->dev->mode_config.mutex);
290 286
291 return written; 287 return written;
292} 288}
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 96e86cf4455b..83efca941388 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -118,7 +118,7 @@ config DRM_EXYNOS_ROTATOR
118 118
119config DRM_EXYNOS_GSC 119config DRM_EXYNOS_GSC
120 bool "GScaler" 120 bool "GScaler"
121 depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 && !ARCH_MULTIPLATFORM 121 depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 && !VIDEO_SAMSUNG_EXYNOS_GSC
122 help 122 help
123 Choose this option if you want to use Exynos GSC for DRM. 123 Choose this option if you want to use Exynos GSC for DRM.
124 124
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index fbe1b3174f75..c7362b99ce28 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -21,11 +21,11 @@
21 21
22#include "exynos_drm_drv.h" 22#include "exynos_drm_drv.h"
23#include "exynos_drm_crtc.h" 23#include "exynos_drm_crtc.h"
24#include "exynos_drm_fb.h"
24#include "exynos_drm_plane.h" 25#include "exynos_drm_plane.h"
25#include "exynos_drm_iommu.h" 26#include "exynos_drm_iommu.h"
26 27
27#define WINDOWS_NR 3 28#define WINDOWS_NR 3
28#define CURSOR_WIN 2
29#define MIN_FB_WIDTH_FOR_16WORD_BURST 128 29#define MIN_FB_WIDTH_FOR_16WORD_BURST 128
30 30
31static const char * const decon_clks_name[] = { 31static const char * const decon_clks_name[] = {
@@ -56,6 +56,7 @@ struct decon_context {
56 struct drm_device *drm_dev; 56 struct drm_device *drm_dev;
57 struct exynos_drm_crtc *crtc; 57 struct exynos_drm_crtc *crtc;
58 struct exynos_drm_plane planes[WINDOWS_NR]; 58 struct exynos_drm_plane planes[WINDOWS_NR];
59 struct exynos_drm_plane_config configs[WINDOWS_NR];
59 void __iomem *addr; 60 void __iomem *addr;
60 struct clk *clks[ARRAY_SIZE(decon_clks_name)]; 61 struct clk *clks[ARRAY_SIZE(decon_clks_name)];
61 int pipe; 62 int pipe;
@@ -71,6 +72,12 @@ static const uint32_t decon_formats[] = {
71 DRM_FORMAT_ARGB8888, 72 DRM_FORMAT_ARGB8888,
72}; 73};
73 74
75static const enum drm_plane_type decon_win_types[WINDOWS_NR] = {
76 DRM_PLANE_TYPE_PRIMARY,
77 DRM_PLANE_TYPE_OVERLAY,
78 DRM_PLANE_TYPE_CURSOR,
79};
80
74static inline void decon_set_bits(struct decon_context *ctx, u32 reg, u32 mask, 81static inline void decon_set_bits(struct decon_context *ctx, u32 reg, u32 mask,
75 u32 val) 82 u32 val)
76{ 83{
@@ -259,21 +266,24 @@ static void decon_atomic_begin(struct exynos_drm_crtc *crtc,
259static void decon_update_plane(struct exynos_drm_crtc *crtc, 266static void decon_update_plane(struct exynos_drm_crtc *crtc,
260 struct exynos_drm_plane *plane) 267 struct exynos_drm_plane *plane)
261{ 268{
269 struct exynos_drm_plane_state *state =
270 to_exynos_plane_state(plane->base.state);
262 struct decon_context *ctx = crtc->ctx; 271 struct decon_context *ctx = crtc->ctx;
263 struct drm_plane_state *state = plane->base.state; 272 struct drm_framebuffer *fb = state->base.fb;
264 unsigned int win = plane->zpos; 273 unsigned int win = plane->zpos;
265 unsigned int bpp = state->fb->bits_per_pixel >> 3; 274 unsigned int bpp = fb->bits_per_pixel >> 3;
266 unsigned int pitch = state->fb->pitches[0]; 275 unsigned int pitch = fb->pitches[0];
276 dma_addr_t dma_addr = exynos_drm_fb_dma_addr(fb, 0);
267 u32 val; 277 u32 val;
268 278
269 if (test_bit(BIT_SUSPENDED, &ctx->flags)) 279 if (test_bit(BIT_SUSPENDED, &ctx->flags))
270 return; 280 return;
271 281
272 val = COORDINATE_X(plane->crtc_x) | COORDINATE_Y(plane->crtc_y); 282 val = COORDINATE_X(state->crtc.x) | COORDINATE_Y(state->crtc.y);
273 writel(val, ctx->addr + DECON_VIDOSDxA(win)); 283 writel(val, ctx->addr + DECON_VIDOSDxA(win));
274 284
275 val = COORDINATE_X(plane->crtc_x + plane->crtc_w - 1) | 285 val = COORDINATE_X(state->crtc.x + state->crtc.w - 1) |
276 COORDINATE_Y(plane->crtc_y + plane->crtc_h - 1); 286 COORDINATE_Y(state->crtc.y + state->crtc.h - 1);
277 writel(val, ctx->addr + DECON_VIDOSDxB(win)); 287 writel(val, ctx->addr + DECON_VIDOSDxB(win));
278 288
279 val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) | 289 val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
@@ -284,20 +294,20 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
284 VIDOSD_Wx_ALPHA_B_F(0x0); 294 VIDOSD_Wx_ALPHA_B_F(0x0);
285 writel(val, ctx->addr + DECON_VIDOSDxD(win)); 295 writel(val, ctx->addr + DECON_VIDOSDxD(win));
286 296
287 writel(plane->dma_addr[0], ctx->addr + DECON_VIDW0xADD0B0(win)); 297 writel(dma_addr, ctx->addr + DECON_VIDW0xADD0B0(win));
288 298
289 val = plane->dma_addr[0] + pitch * plane->crtc_h; 299 val = dma_addr + pitch * state->src.h;
290 writel(val, ctx->addr + DECON_VIDW0xADD1B0(win)); 300 writel(val, ctx->addr + DECON_VIDW0xADD1B0(win));
291 301
292 if (ctx->out_type != IFTYPE_HDMI) 302 if (ctx->out_type != IFTYPE_HDMI)
293 val = BIT_VAL(pitch - plane->crtc_w * bpp, 27, 14) 303 val = BIT_VAL(pitch - state->crtc.w * bpp, 27, 14)
294 | BIT_VAL(plane->crtc_w * bpp, 13, 0); 304 | BIT_VAL(state->crtc.w * bpp, 13, 0);
295 else 305 else
296 val = BIT_VAL(pitch - plane->crtc_w * bpp, 29, 15) 306 val = BIT_VAL(pitch - state->crtc.w * bpp, 29, 15)
297 | BIT_VAL(plane->crtc_w * bpp, 14, 0); 307 | BIT_VAL(state->crtc.w * bpp, 14, 0);
298 writel(val, ctx->addr + DECON_VIDW0xADD2(win)); 308 writel(val, ctx->addr + DECON_VIDW0xADD2(win));
299 309
300 decon_win_set_pixfmt(ctx, win, state->fb); 310 decon_win_set_pixfmt(ctx, win, fb);
301 311
302 /* window enable */ 312 /* window enable */
303 decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0); 313 decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0);
@@ -377,20 +387,12 @@ static void decon_swreset(struct decon_context *ctx)
377static void decon_enable(struct exynos_drm_crtc *crtc) 387static void decon_enable(struct exynos_drm_crtc *crtc)
378{ 388{
379 struct decon_context *ctx = crtc->ctx; 389 struct decon_context *ctx = crtc->ctx;
380 int ret;
381 int i;
382 390
383 if (!test_and_clear_bit(BIT_SUSPENDED, &ctx->flags)) 391 if (!test_and_clear_bit(BIT_SUSPENDED, &ctx->flags))
384 return; 392 return;
385 393
386 pm_runtime_get_sync(ctx->dev); 394 pm_runtime_get_sync(ctx->dev);
387 395
388 for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) {
389 ret = clk_prepare_enable(ctx->clks[i]);
390 if (ret < 0)
391 goto err;
392 }
393
394 set_bit(BIT_CLKS_ENABLED, &ctx->flags); 396 set_bit(BIT_CLKS_ENABLED, &ctx->flags);
395 397
396 /* if vblank was enabled status, enable it again. */ 398 /* if vblank was enabled status, enable it again. */
@@ -399,11 +401,6 @@ static void decon_enable(struct exynos_drm_crtc *crtc)
399 401
400 decon_commit(ctx->crtc); 402 decon_commit(ctx->crtc);
401 403
402 return;
403err:
404 while (--i >= 0)
405 clk_disable_unprepare(ctx->clks[i]);
406
407 set_bit(BIT_SUSPENDED, &ctx->flags); 404 set_bit(BIT_SUSPENDED, &ctx->flags);
408} 405}
409 406
@@ -425,9 +422,6 @@ static void decon_disable(struct exynos_drm_crtc *crtc)
425 422
426 decon_swreset(ctx); 423 decon_swreset(ctx);
427 424
428 for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++)
429 clk_disable_unprepare(ctx->clks[i]);
430
431 clear_bit(BIT_CLKS_ENABLED, &ctx->flags); 425 clear_bit(BIT_CLKS_ENABLED, &ctx->flags);
432 426
433 pm_runtime_put_sync(ctx->dev); 427 pm_runtime_put_sync(ctx->dev);
@@ -478,7 +472,6 @@ err:
478static struct exynos_drm_crtc_ops decon_crtc_ops = { 472static struct exynos_drm_crtc_ops decon_crtc_ops = {
479 .enable = decon_enable, 473 .enable = decon_enable,
480 .disable = decon_disable, 474 .disable = decon_disable,
481 .commit = decon_commit,
482 .enable_vblank = decon_enable_vblank, 475 .enable_vblank = decon_enable_vblank,
483 .disable_vblank = decon_disable_vblank, 476 .disable_vblank = decon_disable_vblank,
484 .atomic_begin = decon_atomic_begin, 477 .atomic_begin = decon_atomic_begin,
@@ -495,7 +488,6 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
495 struct exynos_drm_private *priv = drm_dev->dev_private; 488 struct exynos_drm_private *priv = drm_dev->dev_private;
496 struct exynos_drm_plane *exynos_plane; 489 struct exynos_drm_plane *exynos_plane;
497 enum exynos_drm_output_type out_type; 490 enum exynos_drm_output_type out_type;
498 enum drm_plane_type type;
499 unsigned int win; 491 unsigned int win;
500 int ret; 492 int ret;
501 493
@@ -505,10 +497,13 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
505 for (win = ctx->first_win; win < WINDOWS_NR; win++) { 497 for (win = ctx->first_win; win < WINDOWS_NR; win++) {
506 int tmp = (win == ctx->first_win) ? 0 : win; 498 int tmp = (win == ctx->first_win) ? 0 : win;
507 499
508 type = exynos_plane_get_type(tmp, CURSOR_WIN); 500 ctx->configs[win].pixel_formats = decon_formats;
501 ctx->configs[win].num_pixel_formats = ARRAY_SIZE(decon_formats);
502 ctx->configs[win].zpos = win;
503 ctx->configs[win].type = decon_win_types[tmp];
504
509 ret = exynos_plane_init(drm_dev, &ctx->planes[win], 505 ret = exynos_plane_init(drm_dev, &ctx->planes[win],
510 1 << ctx->pipe, type, decon_formats, 506 1 << ctx->pipe, &ctx->configs[win]);
511 ARRAY_SIZE(decon_formats), win);
512 if (ret) 507 if (ret)
513 return ret; 508 return ret;
514 } 509 }
@@ -581,6 +576,44 @@ out:
581 return IRQ_HANDLED; 576 return IRQ_HANDLED;
582} 577}
583 578
579#ifdef CONFIG_PM
580static int exynos5433_decon_suspend(struct device *dev)
581{
582 struct decon_context *ctx = dev_get_drvdata(dev);
583 int i;
584
585 for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++)
586 clk_disable_unprepare(ctx->clks[i]);
587
588 return 0;
589}
590
591static int exynos5433_decon_resume(struct device *dev)
592{
593 struct decon_context *ctx = dev_get_drvdata(dev);
594 int i, ret;
595
596 for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) {
597 ret = clk_prepare_enable(ctx->clks[i]);
598 if (ret < 0)
599 goto err;
600 }
601
602 return 0;
603
604err:
605 while (--i >= 0)
606 clk_disable_unprepare(ctx->clks[i]);
607
608 return ret;
609}
610#endif
611
612static const struct dev_pm_ops exynos5433_decon_pm_ops = {
613 SET_RUNTIME_PM_OPS(exynos5433_decon_suspend, exynos5433_decon_resume,
614 NULL)
615};
616
584static const struct of_device_id exynos5433_decon_driver_dt_match[] = { 617static const struct of_device_id exynos5433_decon_driver_dt_match[] = {
585 { 618 {
586 .compatible = "samsung,exynos5433-decon", 619 .compatible = "samsung,exynos5433-decon",
@@ -684,6 +717,7 @@ struct platform_driver exynos5433_decon_driver = {
684 .remove = exynos5433_decon_remove, 717 .remove = exynos5433_decon_remove,
685 .driver = { 718 .driver = {
686 .name = "exynos5433-decon", 719 .name = "exynos5433-decon",
720 .pm = &exynos5433_decon_pm_ops,
687 .of_match_table = exynos5433_decon_driver_dt_match, 721 .of_match_table = exynos5433_decon_driver_dt_match,
688 }, 722 },
689}; 723};
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index ead2b16e237d..c47f9af8170b 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -30,6 +30,7 @@
30#include "exynos_drm_crtc.h" 30#include "exynos_drm_crtc.h"
31#include "exynos_drm_plane.h" 31#include "exynos_drm_plane.h"
32#include "exynos_drm_drv.h" 32#include "exynos_drm_drv.h"
33#include "exynos_drm_fb.h"
33#include "exynos_drm_fbdev.h" 34#include "exynos_drm_fbdev.h"
34#include "exynos_drm_iommu.h" 35#include "exynos_drm_iommu.h"
35 36
@@ -40,13 +41,13 @@
40#define MIN_FB_WIDTH_FOR_16WORD_BURST 128 41#define MIN_FB_WIDTH_FOR_16WORD_BURST 128
41 42
42#define WINDOWS_NR 2 43#define WINDOWS_NR 2
43#define CURSOR_WIN 1
44 44
45struct decon_context { 45struct decon_context {
46 struct device *dev; 46 struct device *dev;
47 struct drm_device *drm_dev; 47 struct drm_device *drm_dev;
48 struct exynos_drm_crtc *crtc; 48 struct exynos_drm_crtc *crtc;
49 struct exynos_drm_plane planes[WINDOWS_NR]; 49 struct exynos_drm_plane planes[WINDOWS_NR];
50 struct exynos_drm_plane_config configs[WINDOWS_NR];
50 struct clk *pclk; 51 struct clk *pclk;
51 struct clk *aclk; 52 struct clk *aclk;
52 struct clk *eclk; 53 struct clk *eclk;
@@ -81,6 +82,11 @@ static const uint32_t decon_formats[] = {
81 DRM_FORMAT_BGRA8888, 82 DRM_FORMAT_BGRA8888,
82}; 83};
83 84
85static const enum drm_plane_type decon_win_types[WINDOWS_NR] = {
86 DRM_PLANE_TYPE_PRIMARY,
87 DRM_PLANE_TYPE_CURSOR,
88};
89
84static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc) 90static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc)
85{ 91{
86 struct decon_context *ctx = crtc->ctx; 92 struct decon_context *ctx = crtc->ctx;
@@ -119,13 +125,8 @@ static void decon_clear_channels(struct exynos_drm_crtc *crtc)
119 } 125 }
120 126
121 /* Wait for vsync, as disable channel takes effect at next vsync */ 127 /* Wait for vsync, as disable channel takes effect at next vsync */
122 if (ch_enabled) { 128 if (ch_enabled)
123 unsigned int state = ctx->suspended;
124
125 ctx->suspended = 0;
126 decon_wait_for_vblank(ctx->crtc); 129 decon_wait_for_vblank(ctx->crtc);
127 ctx->suspended = state;
128 }
129} 130}
130 131
131static int decon_ctx_initialize(struct decon_context *ctx, 132static int decon_ctx_initialize(struct decon_context *ctx,
@@ -398,16 +399,17 @@ static void decon_atomic_begin(struct exynos_drm_crtc *crtc,
398static void decon_update_plane(struct exynos_drm_crtc *crtc, 399static void decon_update_plane(struct exynos_drm_crtc *crtc,
399 struct exynos_drm_plane *plane) 400 struct exynos_drm_plane *plane)
400{ 401{
402 struct exynos_drm_plane_state *state =
403 to_exynos_plane_state(plane->base.state);
401 struct decon_context *ctx = crtc->ctx; 404 struct decon_context *ctx = crtc->ctx;
402 struct drm_display_mode *mode = &crtc->base.state->adjusted_mode; 405 struct drm_framebuffer *fb = state->base.fb;
403 struct drm_plane_state *state = plane->base.state;
404 int padding; 406 int padding;
405 unsigned long val, alpha; 407 unsigned long val, alpha;
406 unsigned int last_x; 408 unsigned int last_x;
407 unsigned int last_y; 409 unsigned int last_y;
408 unsigned int win = plane->zpos; 410 unsigned int win = plane->zpos;
409 unsigned int bpp = state->fb->bits_per_pixel >> 3; 411 unsigned int bpp = fb->bits_per_pixel >> 3;
410 unsigned int pitch = state->fb->pitches[0]; 412 unsigned int pitch = fb->pitches[0];
411 413
412 if (ctx->suspended) 414 if (ctx->suspended)
413 return; 415 return;
@@ -423,41 +425,32 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
423 */ 425 */
424 426
425 /* buffer start address */ 427 /* buffer start address */
426 val = (unsigned long)plane->dma_addr[0]; 428 val = (unsigned long)exynos_drm_fb_dma_addr(fb, 0);
427 writel(val, ctx->regs + VIDW_BUF_START(win)); 429 writel(val, ctx->regs + VIDW_BUF_START(win));
428 430
429 padding = (pitch / bpp) - state->fb->width; 431 padding = (pitch / bpp) - fb->width;
430 432
431 /* buffer size */ 433 /* buffer size */
432 writel(state->fb->width + padding, ctx->regs + VIDW_WHOLE_X(win)); 434 writel(fb->width + padding, ctx->regs + VIDW_WHOLE_X(win));
433 writel(state->fb->height, ctx->regs + VIDW_WHOLE_Y(win)); 435 writel(fb->height, ctx->regs + VIDW_WHOLE_Y(win));
434 436
435 /* offset from the start of the buffer to read */ 437 /* offset from the start of the buffer to read */
436 writel(plane->src_x, ctx->regs + VIDW_OFFSET_X(win)); 438 writel(state->src.x, ctx->regs + VIDW_OFFSET_X(win));
437 writel(plane->src_y, ctx->regs + VIDW_OFFSET_Y(win)); 439 writel(state->src.y, ctx->regs + VIDW_OFFSET_Y(win));
438 440
439 DRM_DEBUG_KMS("start addr = 0x%lx\n", 441 DRM_DEBUG_KMS("start addr = 0x%lx\n",
440 (unsigned long)val); 442 (unsigned long)val);
441 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", 443 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
442 plane->crtc_w, plane->crtc_h); 444 state->crtc.w, state->crtc.h);
443 445
444 /* 446 val = VIDOSDxA_TOPLEFT_X(state->crtc.x) |
445 * OSD position. 447 VIDOSDxA_TOPLEFT_Y(state->crtc.y);
446 * In case the window layout goes of LCD layout, DECON fails.
447 */
448 if ((plane->crtc_x + plane->crtc_w) > mode->hdisplay)
449 plane->crtc_x = mode->hdisplay - plane->crtc_w;
450 if ((plane->crtc_y + plane->crtc_h) > mode->vdisplay)
451 plane->crtc_y = mode->vdisplay - plane->crtc_h;
452
453 val = VIDOSDxA_TOPLEFT_X(plane->crtc_x) |
454 VIDOSDxA_TOPLEFT_Y(plane->crtc_y);
455 writel(val, ctx->regs + VIDOSD_A(win)); 448 writel(val, ctx->regs + VIDOSD_A(win));
456 449
457 last_x = plane->crtc_x + plane->crtc_w; 450 last_x = state->crtc.x + state->crtc.w;
458 if (last_x) 451 if (last_x)
459 last_x--; 452 last_x--;
460 last_y = plane->crtc_y + plane->crtc_h; 453 last_y = state->crtc.y + state->crtc.h;
461 if (last_y) 454 if (last_y)
462 last_y--; 455 last_y--;
463 456
@@ -466,7 +459,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
466 writel(val, ctx->regs + VIDOSD_B(win)); 459 writel(val, ctx->regs + VIDOSD_B(win));
467 460
468 DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n", 461 DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
469 plane->crtc_x, plane->crtc_y, last_x, last_y); 462 state->crtc.x, state->crtc.y, last_x, last_y);
470 463
471 /* OSD alpha */ 464 /* OSD alpha */
472 alpha = VIDOSDxC_ALPHA0_R_F(0x0) | 465 alpha = VIDOSDxC_ALPHA0_R_F(0x0) |
@@ -481,7 +474,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
481 474
482 writel(alpha, ctx->regs + VIDOSD_D(win)); 475 writel(alpha, ctx->regs + VIDOSD_D(win));
483 476
484 decon_win_set_pixfmt(ctx, win, state->fb); 477 decon_win_set_pixfmt(ctx, win, fb);
485 478
486 /* hardware window 0 doesn't support color key. */ 479 /* hardware window 0 doesn't support color key. */
487 if (win != 0) 480 if (win != 0)
@@ -555,39 +548,12 @@ static void decon_init(struct decon_context *ctx)
555static void decon_enable(struct exynos_drm_crtc *crtc) 548static void decon_enable(struct exynos_drm_crtc *crtc)
556{ 549{
557 struct decon_context *ctx = crtc->ctx; 550 struct decon_context *ctx = crtc->ctx;
558 int ret;
559 551
560 if (!ctx->suspended) 552 if (!ctx->suspended)
561 return; 553 return;
562 554
563 ctx->suspended = false;
564
565 pm_runtime_get_sync(ctx->dev); 555 pm_runtime_get_sync(ctx->dev);
566 556
567 ret = clk_prepare_enable(ctx->pclk);
568 if (ret < 0) {
569 DRM_ERROR("Failed to prepare_enable the pclk [%d]\n", ret);
570 return;
571 }
572
573 ret = clk_prepare_enable(ctx->aclk);
574 if (ret < 0) {
575 DRM_ERROR("Failed to prepare_enable the aclk [%d]\n", ret);
576 return;
577 }
578
579 ret = clk_prepare_enable(ctx->eclk);
580 if (ret < 0) {
581 DRM_ERROR("Failed to prepare_enable the eclk [%d]\n", ret);
582 return;
583 }
584
585 ret = clk_prepare_enable(ctx->vclk);
586 if (ret < 0) {
587 DRM_ERROR("Failed to prepare_enable the vclk [%d]\n", ret);
588 return;
589 }
590
591 decon_init(ctx); 557 decon_init(ctx);
592 558
593 /* if vblank was enabled status, enable it again. */ 559 /* if vblank was enabled status, enable it again. */
@@ -595,6 +561,8 @@ static void decon_enable(struct exynos_drm_crtc *crtc)
595 decon_enable_vblank(ctx->crtc); 561 decon_enable_vblank(ctx->crtc);
596 562
597 decon_commit(ctx->crtc); 563 decon_commit(ctx->crtc);
564
565 ctx->suspended = false;
598} 566}
599 567
600static void decon_disable(struct exynos_drm_crtc *crtc) 568static void decon_disable(struct exynos_drm_crtc *crtc)
@@ -613,11 +581,6 @@ static void decon_disable(struct exynos_drm_crtc *crtc)
613 for (i = 0; i < WINDOWS_NR; i++) 581 for (i = 0; i < WINDOWS_NR; i++)
614 decon_disable_plane(crtc, &ctx->planes[i]); 582 decon_disable_plane(crtc, &ctx->planes[i]);
615 583
616 clk_disable_unprepare(ctx->vclk);
617 clk_disable_unprepare(ctx->eclk);
618 clk_disable_unprepare(ctx->aclk);
619 clk_disable_unprepare(ctx->pclk);
620
621 pm_runtime_put_sync(ctx->dev); 584 pm_runtime_put_sync(ctx->dev);
622 585
623 ctx->suspended = true; 586 ctx->suspended = true;
@@ -679,8 +642,7 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
679 struct decon_context *ctx = dev_get_drvdata(dev); 642 struct decon_context *ctx = dev_get_drvdata(dev);
680 struct drm_device *drm_dev = data; 643 struct drm_device *drm_dev = data;
681 struct exynos_drm_plane *exynos_plane; 644 struct exynos_drm_plane *exynos_plane;
682 enum drm_plane_type type; 645 unsigned int i;
683 unsigned int zpos;
684 int ret; 646 int ret;
685 647
686 ret = decon_ctx_initialize(ctx, drm_dev); 648 ret = decon_ctx_initialize(ctx, drm_dev);
@@ -689,11 +651,14 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
689 return ret; 651 return ret;
690 } 652 }
691 653
692 for (zpos = 0; zpos < WINDOWS_NR; zpos++) { 654 for (i = 0; i < WINDOWS_NR; i++) {
693 type = exynos_plane_get_type(zpos, CURSOR_WIN); 655 ctx->configs[i].pixel_formats = decon_formats;
694 ret = exynos_plane_init(drm_dev, &ctx->planes[zpos], 656 ctx->configs[i].num_pixel_formats = ARRAY_SIZE(decon_formats);
695 1 << ctx->pipe, type, decon_formats, 657 ctx->configs[i].zpos = i;
696 ARRAY_SIZE(decon_formats), zpos); 658 ctx->configs[i].type = decon_win_types[i];
659
660 ret = exynos_plane_init(drm_dev, &ctx->planes[i],
661 1 << ctx->pipe, &ctx->configs[i]);
697 if (ret) 662 if (ret)
698 return ret; 663 return ret;
699 } 664 }
@@ -843,11 +808,63 @@ static int decon_remove(struct platform_device *pdev)
843 return 0; 808 return 0;
844} 809}
845 810
811#ifdef CONFIG_PM
812static int exynos7_decon_suspend(struct device *dev)
813{
814 struct decon_context *ctx = dev_get_drvdata(dev);
815
816 clk_disable_unprepare(ctx->vclk);
817 clk_disable_unprepare(ctx->eclk);
818 clk_disable_unprepare(ctx->aclk);
819 clk_disable_unprepare(ctx->pclk);
820
821 return 0;
822}
823
824static int exynos7_decon_resume(struct device *dev)
825{
826 struct decon_context *ctx = dev_get_drvdata(dev);
827 int ret;
828
829 ret = clk_prepare_enable(ctx->pclk);
830 if (ret < 0) {
831 DRM_ERROR("Failed to prepare_enable the pclk [%d]\n", ret);
832 return ret;
833 }
834
835 ret = clk_prepare_enable(ctx->aclk);
836 if (ret < 0) {
837 DRM_ERROR("Failed to prepare_enable the aclk [%d]\n", ret);
838 return ret;
839 }
840
841 ret = clk_prepare_enable(ctx->eclk);
842 if (ret < 0) {
843 DRM_ERROR("Failed to prepare_enable the eclk [%d]\n", ret);
844 return ret;
845 }
846
847 ret = clk_prepare_enable(ctx->vclk);
848 if (ret < 0) {
849 DRM_ERROR("Failed to prepare_enable the vclk [%d]\n", ret);
850 return ret;
851 }
852
853 return 0;
854}
855#endif
856
857static const struct dev_pm_ops exynos7_decon_pm_ops = {
858 SET_RUNTIME_PM_OPS(exynos7_decon_suspend, exynos7_decon_resume,
859 NULL)
860};
861
846struct platform_driver decon_driver = { 862struct platform_driver decon_driver = {
847 .probe = decon_probe, 863 .probe = decon_probe,
848 .remove = decon_remove, 864 .remove = decon_remove,
849 .driver = { 865 .driver = {
850 .name = "exynos-decon", 866 .name = "exynos-decon",
867 .pm = &exynos7_decon_pm_ops,
851 .of_match_table = decon_driver_dt_match, 868 .of_match_table = decon_driver_dt_match,
852 }, 869 },
853}; 870};
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index 124fb9a56f02..793e4977fcf7 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -1009,9 +1009,9 @@ static int exynos_drm_attach_lcd_bridge(struct exynos_dp_device *dp,
1009{ 1009{
1010 int ret; 1010 int ret;
1011 1011
1012 encoder->bridge = dp->bridge; 1012 encoder->bridge->next = dp->ptn_bridge;
1013 dp->bridge->encoder = encoder; 1013 dp->ptn_bridge->encoder = encoder;
1014 ret = drm_bridge_attach(encoder->dev, dp->bridge); 1014 ret = drm_bridge_attach(encoder->dev, dp->ptn_bridge);
1015 if (ret) { 1015 if (ret) {
1016 DRM_ERROR("Failed to attach bridge to drm\n"); 1016 DRM_ERROR("Failed to attach bridge to drm\n");
1017 return ret; 1017 return ret;
@@ -1020,14 +1020,15 @@ static int exynos_drm_attach_lcd_bridge(struct exynos_dp_device *dp,
1020 return 0; 1020 return 0;
1021} 1021}
1022 1022
1023static int exynos_dp_create_connector(struct drm_encoder *encoder) 1023static int exynos_dp_bridge_attach(struct drm_bridge *bridge)
1024{ 1024{
1025 struct exynos_dp_device *dp = encoder_to_dp(encoder); 1025 struct exynos_dp_device *dp = bridge->driver_private;
1026 struct drm_encoder *encoder = &dp->encoder;
1026 struct drm_connector *connector = &dp->connector; 1027 struct drm_connector *connector = &dp->connector;
1027 int ret; 1028 int ret;
1028 1029
1029 /* Pre-empt DP connector creation if there's a bridge */ 1030 /* Pre-empt DP connector creation if there's a bridge */
1030 if (dp->bridge) { 1031 if (dp->ptn_bridge) {
1031 ret = exynos_drm_attach_lcd_bridge(dp, encoder); 1032 ret = exynos_drm_attach_lcd_bridge(dp, encoder);
1032 if (!ret) 1033 if (!ret)
1033 return 0; 1034 return 0;
@@ -1052,27 +1053,16 @@ static int exynos_dp_create_connector(struct drm_encoder *encoder)
1052 return ret; 1053 return ret;
1053} 1054}
1054 1055
1055static bool exynos_dp_mode_fixup(struct drm_encoder *encoder, 1056static void exynos_dp_bridge_enable(struct drm_bridge *bridge)
1056 const struct drm_display_mode *mode,
1057 struct drm_display_mode *adjusted_mode)
1058{
1059 return true;
1060}
1061
1062static void exynos_dp_mode_set(struct drm_encoder *encoder,
1063 struct drm_display_mode *mode,
1064 struct drm_display_mode *adjusted_mode)
1065{
1066}
1067
1068static void exynos_dp_enable(struct drm_encoder *encoder)
1069{ 1057{
1070 struct exynos_dp_device *dp = encoder_to_dp(encoder); 1058 struct exynos_dp_device *dp = bridge->driver_private;
1071 struct exynos_drm_crtc *crtc = dp_to_crtc(dp); 1059 struct exynos_drm_crtc *crtc = dp_to_crtc(dp);
1072 1060
1073 if (dp->dpms_mode == DRM_MODE_DPMS_ON) 1061 if (dp->dpms_mode == DRM_MODE_DPMS_ON)
1074 return; 1062 return;
1075 1063
1064 pm_runtime_get_sync(dp->dev);
1065
1076 if (dp->panel) { 1066 if (dp->panel) {
1077 if (drm_panel_prepare(dp->panel)) { 1067 if (drm_panel_prepare(dp->panel)) {
1078 DRM_ERROR("failed to setup the panel\n"); 1068 DRM_ERROR("failed to setup the panel\n");
@@ -1083,7 +1073,6 @@ static void exynos_dp_enable(struct drm_encoder *encoder)
1083 if (crtc->ops->clock_enable) 1073 if (crtc->ops->clock_enable)
1084 crtc->ops->clock_enable(dp_to_crtc(dp), true); 1074 crtc->ops->clock_enable(dp_to_crtc(dp), true);
1085 1075
1086 clk_prepare_enable(dp->clock);
1087 phy_power_on(dp->phy); 1076 phy_power_on(dp->phy);
1088 exynos_dp_init_dp(dp); 1077 exynos_dp_init_dp(dp);
1089 enable_irq(dp->irq); 1078 enable_irq(dp->irq);
@@ -1092,9 +1081,9 @@ static void exynos_dp_enable(struct drm_encoder *encoder)
1092 dp->dpms_mode = DRM_MODE_DPMS_ON; 1081 dp->dpms_mode = DRM_MODE_DPMS_ON;
1093} 1082}
1094 1083
1095static void exynos_dp_disable(struct drm_encoder *encoder) 1084static void exynos_dp_bridge_disable(struct drm_bridge *bridge)
1096{ 1085{
1097 struct exynos_dp_device *dp = encoder_to_dp(encoder); 1086 struct exynos_dp_device *dp = bridge->driver_private;
1098 struct exynos_drm_crtc *crtc = dp_to_crtc(dp); 1087 struct exynos_drm_crtc *crtc = dp_to_crtc(dp);
1099 1088
1100 if (dp->dpms_mode != DRM_MODE_DPMS_ON) 1089 if (dp->dpms_mode != DRM_MODE_DPMS_ON)
@@ -1110,7 +1099,6 @@ static void exynos_dp_disable(struct drm_encoder *encoder)
1110 disable_irq(dp->irq); 1099 disable_irq(dp->irq);
1111 flush_work(&dp->hotplug_work); 1100 flush_work(&dp->hotplug_work);
1112 phy_power_off(dp->phy); 1101 phy_power_off(dp->phy);
1113 clk_disable_unprepare(dp->clock);
1114 1102
1115 if (crtc->ops->clock_enable) 1103 if (crtc->ops->clock_enable)
1116 crtc->ops->clock_enable(dp_to_crtc(dp), false); 1104 crtc->ops->clock_enable(dp_to_crtc(dp), false);
@@ -1120,9 +1108,74 @@ static void exynos_dp_disable(struct drm_encoder *encoder)
1120 DRM_ERROR("failed to turnoff the panel\n"); 1108 DRM_ERROR("failed to turnoff the panel\n");
1121 } 1109 }
1122 1110
1111 pm_runtime_put_sync(dp->dev);
1112
1123 dp->dpms_mode = DRM_MODE_DPMS_OFF; 1113 dp->dpms_mode = DRM_MODE_DPMS_OFF;
1124} 1114}
1125 1115
1116static void exynos_dp_bridge_nop(struct drm_bridge *bridge)
1117{
1118 /* do nothing */
1119}
1120
1121static const struct drm_bridge_funcs exynos_dp_bridge_funcs = {
1122 .enable = exynos_dp_bridge_enable,
1123 .disable = exynos_dp_bridge_disable,
1124 .pre_enable = exynos_dp_bridge_nop,
1125 .post_disable = exynos_dp_bridge_nop,
1126 .attach = exynos_dp_bridge_attach,
1127};
1128
1129static int exynos_dp_create_connector(struct drm_encoder *encoder)
1130{
1131 struct exynos_dp_device *dp = encoder_to_dp(encoder);
1132 struct drm_device *drm_dev = dp->drm_dev;
1133 struct drm_bridge *bridge;
1134 int ret;
1135
1136 bridge = devm_kzalloc(drm_dev->dev, sizeof(*bridge), GFP_KERNEL);
1137 if (!bridge) {
1138 DRM_ERROR("failed to allocate for drm bridge\n");
1139 return -ENOMEM;
1140 }
1141
1142 dp->bridge = bridge;
1143
1144 encoder->bridge = bridge;
1145 bridge->driver_private = dp;
1146 bridge->encoder = encoder;
1147 bridge->funcs = &exynos_dp_bridge_funcs;
1148
1149 ret = drm_bridge_attach(drm_dev, bridge);
1150 if (ret) {
1151 DRM_ERROR("failed to attach drm bridge\n");
1152 return -EINVAL;
1153 }
1154
1155 return 0;
1156}
1157
1158static bool exynos_dp_mode_fixup(struct drm_encoder *encoder,
1159 const struct drm_display_mode *mode,
1160 struct drm_display_mode *adjusted_mode)
1161{
1162 return true;
1163}
1164
1165static void exynos_dp_mode_set(struct drm_encoder *encoder,
1166 struct drm_display_mode *mode,
1167 struct drm_display_mode *adjusted_mode)
1168{
1169}
1170
1171static void exynos_dp_enable(struct drm_encoder *encoder)
1172{
1173}
1174
1175static void exynos_dp_disable(struct drm_encoder *encoder)
1176{
1177}
1178
1126static struct drm_encoder_helper_funcs exynos_dp_encoder_helper_funcs = { 1179static struct drm_encoder_helper_funcs exynos_dp_encoder_helper_funcs = {
1127 .mode_fixup = exynos_dp_mode_fixup, 1180 .mode_fixup = exynos_dp_mode_fixup,
1128 .mode_set = exynos_dp_mode_set, 1181 .mode_set = exynos_dp_mode_set,
@@ -1238,7 +1291,7 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
1238 } 1291 }
1239 } 1292 }
1240 1293
1241 if (!dp->panel && !dp->bridge) { 1294 if (!dp->panel && !dp->ptn_bridge) {
1242 ret = exynos_dp_dt_parse_panel(dp); 1295 ret = exynos_dp_dt_parse_panel(dp);
1243 if (ret) 1296 if (ret)
1244 return ret; 1297 return ret;
@@ -1289,10 +1342,6 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
1289 1342
1290 INIT_WORK(&dp->hotplug_work, exynos_dp_hotplug); 1343 INIT_WORK(&dp->hotplug_work, exynos_dp_hotplug);
1291 1344
1292 phy_power_on(dp->phy);
1293
1294 exynos_dp_init_dp(dp);
1295
1296 ret = devm_request_irq(&pdev->dev, dp->irq, exynos_dp_irq_handler, 1345 ret = devm_request_irq(&pdev->dev, dp->irq, exynos_dp_irq_handler,
1297 irq_flags, "exynos-dp", dp); 1346 irq_flags, "exynos-dp", dp);
1298 if (ret) { 1347 if (ret) {
@@ -1313,7 +1362,7 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
1313 DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); 1362 DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
1314 1363
1315 drm_encoder_init(drm_dev, encoder, &exynos_dp_encoder_funcs, 1364 drm_encoder_init(drm_dev, encoder, &exynos_dp_encoder_funcs,
1316 DRM_MODE_ENCODER_TMDS); 1365 DRM_MODE_ENCODER_TMDS, NULL);
1317 1366
1318 drm_encoder_helper_add(encoder, &exynos_dp_encoder_helper_funcs); 1367 drm_encoder_helper_add(encoder, &exynos_dp_encoder_helper_funcs);
1319 1368
@@ -1343,8 +1392,9 @@ static const struct component_ops exynos_dp_ops = {
1343static int exynos_dp_probe(struct platform_device *pdev) 1392static int exynos_dp_probe(struct platform_device *pdev)
1344{ 1393{
1345 struct device *dev = &pdev->dev; 1394 struct device *dev = &pdev->dev;
1346 struct device_node *panel_node, *bridge_node, *endpoint; 1395 struct device_node *panel_node = NULL, *bridge_node, *endpoint = NULL;
1347 struct exynos_dp_device *dp; 1396 struct exynos_dp_device *dp;
1397 int ret;
1348 1398
1349 dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device), 1399 dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device),
1350 GFP_KERNEL); 1400 GFP_KERNEL);
@@ -1353,36 +1403,96 @@ static int exynos_dp_probe(struct platform_device *pdev)
1353 1403
1354 platform_set_drvdata(pdev, dp); 1404 platform_set_drvdata(pdev, dp);
1355 1405
1406 /* This is for the backward compatibility. */
1356 panel_node = of_parse_phandle(dev->of_node, "panel", 0); 1407 panel_node = of_parse_phandle(dev->of_node, "panel", 0);
1357 if (panel_node) { 1408 if (panel_node) {
1358 dp->panel = of_drm_find_panel(panel_node); 1409 dp->panel = of_drm_find_panel(panel_node);
1359 of_node_put(panel_node); 1410 of_node_put(panel_node);
1360 if (!dp->panel) 1411 if (!dp->panel)
1361 return -EPROBE_DEFER; 1412 return -EPROBE_DEFER;
1413 } else {
1414 endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
1415 if (endpoint) {
1416 panel_node = of_graph_get_remote_port_parent(endpoint);
1417 if (panel_node) {
1418 dp->panel = of_drm_find_panel(panel_node);
1419 of_node_put(panel_node);
1420 if (!dp->panel)
1421 return -EPROBE_DEFER;
1422 } else {
1423 DRM_ERROR("no port node for panel device.\n");
1424 return -EINVAL;
1425 }
1426 }
1362 } 1427 }
1363 1428
1429 if (endpoint)
1430 goto out;
1431
1364 endpoint = of_graph_get_next_endpoint(dev->of_node, NULL); 1432 endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
1365 if (endpoint) { 1433 if (endpoint) {
1366 bridge_node = of_graph_get_remote_port_parent(endpoint); 1434 bridge_node = of_graph_get_remote_port_parent(endpoint);
1367 if (bridge_node) { 1435 if (bridge_node) {
1368 dp->bridge = of_drm_find_bridge(bridge_node); 1436 dp->ptn_bridge = of_drm_find_bridge(bridge_node);
1369 of_node_put(bridge_node); 1437 of_node_put(bridge_node);
1370 if (!dp->bridge) 1438 if (!dp->ptn_bridge)
1371 return -EPROBE_DEFER; 1439 return -EPROBE_DEFER;
1372 } else 1440 } else
1373 return -EPROBE_DEFER; 1441 return -EPROBE_DEFER;
1374 } 1442 }
1375 1443
1376 return component_add(&pdev->dev, &exynos_dp_ops); 1444out:
1445 pm_runtime_enable(dev);
1446
1447 ret = component_add(&pdev->dev, &exynos_dp_ops);
1448 if (ret)
1449 goto err_disable_pm_runtime;
1450
1451 return ret;
1452
1453err_disable_pm_runtime:
1454 pm_runtime_disable(dev);
1455
1456 return ret;
1377} 1457}
1378 1458
1379static int exynos_dp_remove(struct platform_device *pdev) 1459static int exynos_dp_remove(struct platform_device *pdev)
1380{ 1460{
1461 pm_runtime_disable(&pdev->dev);
1381 component_del(&pdev->dev, &exynos_dp_ops); 1462 component_del(&pdev->dev, &exynos_dp_ops);
1382 1463
1383 return 0; 1464 return 0;
1384} 1465}
1385 1466
1467#ifdef CONFIG_PM
1468static int exynos_dp_suspend(struct device *dev)
1469{
1470 struct exynos_dp_device *dp = dev_get_drvdata(dev);
1471
1472 clk_disable_unprepare(dp->clock);
1473
1474 return 0;
1475}
1476
1477static int exynos_dp_resume(struct device *dev)
1478{
1479 struct exynos_dp_device *dp = dev_get_drvdata(dev);
1480 int ret;
1481
1482 ret = clk_prepare_enable(dp->clock);
1483 if (ret < 0) {
1484 DRM_ERROR("Failed to prepare_enable the clock clk [%d]\n", ret);
1485 return ret;
1486 }
1487
1488 return 0;
1489}
1490#endif
1491
1492static const struct dev_pm_ops exynos_dp_pm_ops = {
1493 SET_RUNTIME_PM_OPS(exynos_dp_suspend, exynos_dp_resume, NULL)
1494};
1495
1386static const struct of_device_id exynos_dp_match[] = { 1496static const struct of_device_id exynos_dp_match[] = {
1387 { .compatible = "samsung,exynos5-dp" }, 1497 { .compatible = "samsung,exynos5-dp" },
1388 {}, 1498 {},
@@ -1395,6 +1505,7 @@ struct platform_driver dp_driver = {
1395 .driver = { 1505 .driver = {
1396 .name = "exynos-dp", 1506 .name = "exynos-dp",
1397 .owner = THIS_MODULE, 1507 .owner = THIS_MODULE,
1508 .pm = &exynos_dp_pm_ops,
1398 .of_match_table = exynos_dp_match, 1509 .of_match_table = exynos_dp_match,
1399 }, 1510 },
1400}; 1511};
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.h b/drivers/gpu/drm/exynos/exynos_dp_core.h
index e413b6f7b0e7..66eec4b2d5c6 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.h
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.h
@@ -153,6 +153,7 @@ struct exynos_dp_device {
153 struct drm_connector connector; 153 struct drm_connector connector;
154 struct drm_panel *panel; 154 struct drm_panel *panel;
155 struct drm_bridge *bridge; 155 struct drm_bridge *bridge;
156 struct drm_bridge *ptn_bridge;
156 struct clk *clock; 157 struct clk *clock;
157 unsigned int irq; 158 unsigned int irq;
158 void __iomem *reg_base; 159 void __iomem *reg_base;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index b3ba27fd9a6b..9d30a0fa3248 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -150,7 +150,7 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
150 private->crtc[pipe] = crtc; 150 private->crtc[pipe] = crtc;
151 151
152 ret = drm_crtc_init_with_planes(drm_dev, crtc, plane, NULL, 152 ret = drm_crtc_init_with_planes(drm_dev, crtc, plane, NULL,
153 &exynos_crtc_funcs); 153 &exynos_crtc_funcs, NULL);
154 if (ret < 0) 154 if (ret < 0)
155 goto err_crtc; 155 goto err_crtc;
156 156
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index c748b8790de3..1dbf8dca2d6b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -309,7 +309,7 @@ int exynos_dpi_bind(struct drm_device *dev, struct drm_encoder *encoder)
309 DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); 309 DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
310 310
311 drm_encoder_init(dev, encoder, &exynos_dpi_encoder_funcs, 311 drm_encoder_init(dev, encoder, &exynos_dpi_encoder_funcs,
312 DRM_MODE_ENCODER_TMDS); 312 DRM_MODE_ENCODER_TMDS, NULL);
313 313
314 drm_encoder_helper_add(encoder, &exynos_dpi_encoder_helper_funcs); 314 drm_encoder_helper_add(encoder, &exynos_dpi_encoder_helper_funcs);
315 315
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 2c6019d6a205..9756797a15a5 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -304,45 +304,6 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
304 return 0; 304 return 0;
305} 305}
306 306
307#ifdef CONFIG_PM_SLEEP
308static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state)
309{
310 struct drm_connector *connector;
311
312 drm_modeset_lock_all(dev);
313 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
314 int old_dpms = connector->dpms;
315
316 if (connector->funcs->dpms)
317 connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF);
318
319 /* Set the old mode back to the connector for resume */
320 connector->dpms = old_dpms;
321 }
322 drm_modeset_unlock_all(dev);
323
324 return 0;
325}
326
327static int exynos_drm_resume(struct drm_device *dev)
328{
329 struct drm_connector *connector;
330
331 drm_modeset_lock_all(dev);
332 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
333 if (connector->funcs->dpms) {
334 int dpms = connector->dpms;
335
336 connector->dpms = DRM_MODE_DPMS_OFF;
337 connector->funcs->dpms(connector, dpms);
338 }
339 }
340 drm_modeset_unlock_all(dev);
341
342 return 0;
343}
344#endif
345
346static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) 307static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
347{ 308{
348 struct drm_exynos_file_private *file_priv; 309 struct drm_exynos_file_private *file_priv;
@@ -476,31 +437,54 @@ static struct drm_driver exynos_drm_driver = {
476}; 437};
477 438
478#ifdef CONFIG_PM_SLEEP 439#ifdef CONFIG_PM_SLEEP
479static int exynos_drm_sys_suspend(struct device *dev) 440static int exynos_drm_suspend(struct device *dev)
480{ 441{
481 struct drm_device *drm_dev = dev_get_drvdata(dev); 442 struct drm_device *drm_dev = dev_get_drvdata(dev);
482 pm_message_t message; 443 struct drm_connector *connector;
483 444
484 if (pm_runtime_suspended(dev) || !drm_dev) 445 if (pm_runtime_suspended(dev) || !drm_dev)
485 return 0; 446 return 0;
486 447
487 message.event = PM_EVENT_SUSPEND; 448 drm_modeset_lock_all(drm_dev);
488 return exynos_drm_suspend(drm_dev, message); 449 drm_for_each_connector(connector, drm_dev) {
450 int old_dpms = connector->dpms;
451
452 if (connector->funcs->dpms)
453 connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF);
454
455 /* Set the old mode back to the connector for resume */
456 connector->dpms = old_dpms;
457 }
458 drm_modeset_unlock_all(drm_dev);
459
460 return 0;
489} 461}
490 462
491static int exynos_drm_sys_resume(struct device *dev) 463static int exynos_drm_resume(struct device *dev)
492{ 464{
493 struct drm_device *drm_dev = dev_get_drvdata(dev); 465 struct drm_device *drm_dev = dev_get_drvdata(dev);
466 struct drm_connector *connector;
494 467
495 if (pm_runtime_suspended(dev) || !drm_dev) 468 if (pm_runtime_suspended(dev) || !drm_dev)
496 return 0; 469 return 0;
497 470
498 return exynos_drm_resume(drm_dev); 471 drm_modeset_lock_all(drm_dev);
472 drm_for_each_connector(connector, drm_dev) {
473 if (connector->funcs->dpms) {
474 int dpms = connector->dpms;
475
476 connector->dpms = DRM_MODE_DPMS_OFF;
477 connector->funcs->dpms(connector, dpms);
478 }
479 }
480 drm_modeset_unlock_all(drm_dev);
481
482 return 0;
499} 483}
500#endif 484#endif
501 485
502static const struct dev_pm_ops exynos_drm_pm_ops = { 486static const struct dev_pm_ops exynos_drm_pm_ops = {
503 SET_SYSTEM_SLEEP_PM_OPS(exynos_drm_sys_suspend, exynos_drm_sys_resume) 487 SET_SYSTEM_SLEEP_PM_OPS(exynos_drm_suspend, exynos_drm_resume)
504}; 488};
505 489
506/* forward declaration */ 490/* forward declaration */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index f1eda7fa4e3c..82bbd7f4b316 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -38,24 +38,44 @@ enum exynos_drm_output_type {
38 EXYNOS_DISPLAY_TYPE_VIDI, 38 EXYNOS_DISPLAY_TYPE_VIDI,
39}; 39};
40 40
41struct exynos_drm_rect {
42 unsigned int x, y;
43 unsigned int w, h;
44};
45
41/* 46/*
42 * Exynos drm common overlay structure. 47 * Exynos drm plane state structure.
43 * 48 *
44 * @base: plane object 49 * @base: plane_state object (contains drm_framebuffer pointer)
45 * @src_x: offset x on a framebuffer to be displayed. 50 * @src: rectangle of the source image data to be displayed (clipped to
46 * - the unit is screen coordinates. 51 * visible part).
47 * @src_y: offset y on a framebuffer to be displayed. 52 * @crtc: rectangle of the target image position on hardware screen
48 * - the unit is screen coordinates. 53 * (clipped to visible part).
49 * @src_w: width of a partial image to be displayed from framebuffer.
50 * @src_h: height of a partial image to be displayed from framebuffer.
51 * @crtc_x: offset x on hardware screen.
52 * @crtc_y: offset y on hardware screen.
53 * @crtc_w: window width to be displayed (hardware screen).
54 * @crtc_h: window height to be displayed (hardware screen).
55 * @h_ratio: horizontal scaling ratio, 16.16 fixed point 54 * @h_ratio: horizontal scaling ratio, 16.16 fixed point
56 * @v_ratio: vertical scaling ratio, 16.16 fixed point 55 * @v_ratio: vertical scaling ratio, 16.16 fixed point
57 * @dma_addr: array of bus(accessed by dma) address to the memory region 56 *
58 * allocated for a overlay. 57 * this structure consists plane state data that will be applied to hardware
58 * specific overlay info.
59 */
60
61struct exynos_drm_plane_state {
62 struct drm_plane_state base;
63 struct exynos_drm_rect crtc;
64 struct exynos_drm_rect src;
65 unsigned int h_ratio;
66 unsigned int v_ratio;
67};
68
69static inline struct exynos_drm_plane_state *
70to_exynos_plane_state(struct drm_plane_state *state)
71{
72 return container_of(state, struct exynos_drm_plane_state, base);
73}
74
75/*
76 * Exynos drm common overlay structure.
77 *
78 * @base: plane object
59 * @zpos: order of overlay layer(z position). 79 * @zpos: order of overlay layer(z position).
60 * 80 *
61 * this structure is common to exynos SoC and its contents would be copied 81 * this structure is common to exynos SoC and its contents would be copied
@@ -64,21 +84,32 @@ enum exynos_drm_output_type {
64 84
65struct exynos_drm_plane { 85struct exynos_drm_plane {
66 struct drm_plane base; 86 struct drm_plane base;
67 unsigned int src_x; 87 const struct exynos_drm_plane_config *config;
68 unsigned int src_y;
69 unsigned int src_w;
70 unsigned int src_h;
71 unsigned int crtc_x;
72 unsigned int crtc_y;
73 unsigned int crtc_w;
74 unsigned int crtc_h;
75 unsigned int h_ratio;
76 unsigned int v_ratio;
77 dma_addr_t dma_addr[MAX_FB_BUFFER];
78 unsigned int zpos; 88 unsigned int zpos;
79 struct drm_framebuffer *pending_fb; 89 struct drm_framebuffer *pending_fb;
80}; 90};
81 91
92#define EXYNOS_DRM_PLANE_CAP_DOUBLE (1 << 0)
93#define EXYNOS_DRM_PLANE_CAP_SCALE (1 << 1)
94
95/*
96 * Exynos DRM plane configuration structure.
97 *
98 * @zpos: z-position of the plane.
99 * @type: type of the plane (primary, cursor or overlay).
100 * @pixel_formats: supported pixel formats.
101 * @num_pixel_formats: number of elements in 'pixel_formats'.
102 * @capabilities: supported features (see EXYNOS_DRM_PLANE_CAP_*)
103 */
104
105struct exynos_drm_plane_config {
106 unsigned int zpos;
107 enum drm_plane_type type;
108 const uint32_t *pixel_formats;
109 unsigned int num_pixel_formats;
110 unsigned int capabilities;
111};
112
82/* 113/*
83 * Exynos drm crtc ops 114 * Exynos drm crtc ops
84 * 115 *
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 12b03b364703..bc09bba3132a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1458,66 +1458,6 @@ static const struct mipi_dsi_host_ops exynos_dsi_ops = {
1458 .transfer = exynos_dsi_host_transfer, 1458 .transfer = exynos_dsi_host_transfer,
1459}; 1459};
1460 1460
1461static int exynos_dsi_poweron(struct exynos_dsi *dsi)
1462{
1463 struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
1464 int ret, i;
1465
1466 ret = regulator_bulk_enable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
1467 if (ret < 0) {
1468 dev_err(dsi->dev, "cannot enable regulators %d\n", ret);
1469 return ret;
1470 }
1471
1472 for (i = 0; i < driver_data->num_clks; i++) {
1473 ret = clk_prepare_enable(dsi->clks[i]);
1474 if (ret < 0)
1475 goto err_clk;
1476 }
1477
1478 ret = phy_power_on(dsi->phy);
1479 if (ret < 0) {
1480 dev_err(dsi->dev, "cannot enable phy %d\n", ret);
1481 goto err_clk;
1482 }
1483
1484 return 0;
1485
1486err_clk:
1487 while (--i > -1)
1488 clk_disable_unprepare(dsi->clks[i]);
1489 regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
1490
1491 return ret;
1492}
1493
1494static void exynos_dsi_poweroff(struct exynos_dsi *dsi)
1495{
1496 struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
1497 int ret, i;
1498
1499 usleep_range(10000, 20000);
1500
1501 if (dsi->state & DSIM_STATE_INITIALIZED) {
1502 dsi->state &= ~DSIM_STATE_INITIALIZED;
1503
1504 exynos_dsi_disable_clock(dsi);
1505
1506 exynos_dsi_disable_irq(dsi);
1507 }
1508
1509 dsi->state &= ~DSIM_STATE_CMD_LPM;
1510
1511 phy_power_off(dsi->phy);
1512
1513 for (i = driver_data->num_clks - 1; i > -1; i--)
1514 clk_disable_unprepare(dsi->clks[i]);
1515
1516 ret = regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
1517 if (ret < 0)
1518 dev_err(dsi->dev, "cannot disable regulators %d\n", ret);
1519}
1520
1521static void exynos_dsi_enable(struct drm_encoder *encoder) 1461static void exynos_dsi_enable(struct drm_encoder *encoder)
1522{ 1462{
1523 struct exynos_dsi *dsi = encoder_to_dsi(encoder); 1463 struct exynos_dsi *dsi = encoder_to_dsi(encoder);
@@ -1526,16 +1466,14 @@ static void exynos_dsi_enable(struct drm_encoder *encoder)
1526 if (dsi->state & DSIM_STATE_ENABLED) 1466 if (dsi->state & DSIM_STATE_ENABLED)
1527 return; 1467 return;
1528 1468
1529 ret = exynos_dsi_poweron(dsi); 1469 pm_runtime_get_sync(dsi->dev);
1530 if (ret < 0)
1531 return;
1532 1470
1533 dsi->state |= DSIM_STATE_ENABLED; 1471 dsi->state |= DSIM_STATE_ENABLED;
1534 1472
1535 ret = drm_panel_prepare(dsi->panel); 1473 ret = drm_panel_prepare(dsi->panel);
1536 if (ret < 0) { 1474 if (ret < 0) {
1537 dsi->state &= ~DSIM_STATE_ENABLED; 1475 dsi->state &= ~DSIM_STATE_ENABLED;
1538 exynos_dsi_poweroff(dsi); 1476 pm_runtime_put_sync(dsi->dev);
1539 return; 1477 return;
1540 } 1478 }
1541 1479
@@ -1547,7 +1485,7 @@ static void exynos_dsi_enable(struct drm_encoder *encoder)
1547 dsi->state &= ~DSIM_STATE_ENABLED; 1485 dsi->state &= ~DSIM_STATE_ENABLED;
1548 exynos_dsi_set_display_enable(dsi, false); 1486 exynos_dsi_set_display_enable(dsi, false);
1549 drm_panel_unprepare(dsi->panel); 1487 drm_panel_unprepare(dsi->panel);
1550 exynos_dsi_poweroff(dsi); 1488 pm_runtime_put_sync(dsi->dev);
1551 return; 1489 return;
1552 } 1490 }
1553 1491
@@ -1569,7 +1507,7 @@ static void exynos_dsi_disable(struct drm_encoder *encoder)
1569 1507
1570 dsi->state &= ~DSIM_STATE_ENABLED; 1508 dsi->state &= ~DSIM_STATE_ENABLED;
1571 1509
1572 exynos_dsi_poweroff(dsi); 1510 pm_runtime_put_sync(dsi->dev);
1573} 1511}
1574 1512
1575static enum drm_connector_status 1513static enum drm_connector_status
@@ -1797,13 +1735,13 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
1797 1735
1798 ep = of_graph_get_next_endpoint(node, NULL); 1736 ep = of_graph_get_next_endpoint(node, NULL);
1799 if (!ep) { 1737 if (!ep) {
1800 ret = -ENXIO; 1738 ret = -EINVAL;
1801 goto end; 1739 goto end;
1802 } 1740 }
1803 1741
1804 dsi->bridge_node = of_graph_get_remote_port_parent(ep); 1742 dsi->bridge_node = of_graph_get_remote_port_parent(ep);
1805 if (!dsi->bridge_node) { 1743 if (!dsi->bridge_node) {
1806 ret = -ENXIO; 1744 ret = -EINVAL;
1807 goto end; 1745 goto end;
1808 } 1746 }
1809end: 1747end:
@@ -1831,7 +1769,7 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
1831 DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); 1769 DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
1832 1770
1833 drm_encoder_init(drm_dev, encoder, &exynos_dsi_encoder_funcs, 1771 drm_encoder_init(drm_dev, encoder, &exynos_dsi_encoder_funcs,
1834 DRM_MODE_ENCODER_TMDS); 1772 DRM_MODE_ENCODER_TMDS, NULL);
1835 1773
1836 drm_encoder_helper_add(encoder, &exynos_dsi_encoder_helper_funcs); 1774 drm_encoder_helper_add(encoder, &exynos_dsi_encoder_helper_funcs);
1837 1775
@@ -1954,22 +1892,99 @@ static int exynos_dsi_probe(struct platform_device *pdev)
1954 1892
1955 platform_set_drvdata(pdev, &dsi->encoder); 1893 platform_set_drvdata(pdev, &dsi->encoder);
1956 1894
1895 pm_runtime_enable(dev);
1896
1957 return component_add(dev, &exynos_dsi_component_ops); 1897 return component_add(dev, &exynos_dsi_component_ops);
1958} 1898}
1959 1899
1960static int exynos_dsi_remove(struct platform_device *pdev) 1900static int exynos_dsi_remove(struct platform_device *pdev)
1961{ 1901{
1902 pm_runtime_disable(&pdev->dev);
1903
1962 component_del(&pdev->dev, &exynos_dsi_component_ops); 1904 component_del(&pdev->dev, &exynos_dsi_component_ops);
1963 1905
1964 return 0; 1906 return 0;
1965} 1907}
1966 1908
1909#ifdef CONFIG_PM
1910static int exynos_dsi_suspend(struct device *dev)
1911{
1912 struct drm_encoder *encoder = dev_get_drvdata(dev);
1913 struct exynos_dsi *dsi = encoder_to_dsi(encoder);
1914 struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
1915 int ret, i;
1916
1917 usleep_range(10000, 20000);
1918
1919 if (dsi->state & DSIM_STATE_INITIALIZED) {
1920 dsi->state &= ~DSIM_STATE_INITIALIZED;
1921
1922 exynos_dsi_disable_clock(dsi);
1923
1924 exynos_dsi_disable_irq(dsi);
1925 }
1926
1927 dsi->state &= ~DSIM_STATE_CMD_LPM;
1928
1929 phy_power_off(dsi->phy);
1930
1931 for (i = driver_data->num_clks - 1; i > -1; i--)
1932 clk_disable_unprepare(dsi->clks[i]);
1933
1934 ret = regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
1935 if (ret < 0)
1936 dev_err(dsi->dev, "cannot disable regulators %d\n", ret);
1937
1938 return 0;
1939}
1940
1941static int exynos_dsi_resume(struct device *dev)
1942{
1943 struct drm_encoder *encoder = dev_get_drvdata(dev);
1944 struct exynos_dsi *dsi = encoder_to_dsi(encoder);
1945 struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
1946 int ret, i;
1947
1948 ret = regulator_bulk_enable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
1949 if (ret < 0) {
1950 dev_err(dsi->dev, "cannot enable regulators %d\n", ret);
1951 return ret;
1952 }
1953
1954 for (i = 0; i < driver_data->num_clks; i++) {
1955 ret = clk_prepare_enable(dsi->clks[i]);
1956 if (ret < 0)
1957 goto err_clk;
1958 }
1959
1960 ret = phy_power_on(dsi->phy);
1961 if (ret < 0) {
1962 dev_err(dsi->dev, "cannot enable phy %d\n", ret);
1963 goto err_clk;
1964 }
1965
1966 return 0;
1967
1968err_clk:
1969 while (--i > -1)
1970 clk_disable_unprepare(dsi->clks[i]);
1971 regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
1972
1973 return ret;
1974}
1975#endif
1976
1977static const struct dev_pm_ops exynos_dsi_pm_ops = {
1978 SET_RUNTIME_PM_OPS(exynos_dsi_suspend, exynos_dsi_resume, NULL)
1979};
1980
1967struct platform_driver dsi_driver = { 1981struct platform_driver dsi_driver = {
1968 .probe = exynos_dsi_probe, 1982 .probe = exynos_dsi_probe,
1969 .remove = exynos_dsi_remove, 1983 .remove = exynos_dsi_remove,
1970 .driver = { 1984 .driver = {
1971 .name = "exynos-dsi", 1985 .name = "exynos-dsi",
1972 .owner = THIS_MODULE, 1986 .owner = THIS_MODULE,
1987 .pm = &exynos_dsi_pm_ops,
1973 .of_match_table = exynos_dsi_of_match, 1988 .of_match_table = exynos_dsi_of_match,
1974 }, 1989 },
1975}; 1990};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index fcea28bdbc42..f6bdb0d6f142 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -37,6 +37,7 @@
37struct exynos_drm_fb { 37struct exynos_drm_fb {
38 struct drm_framebuffer fb; 38 struct drm_framebuffer fb;
39 struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER]; 39 struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
40 dma_addr_t dma_addr[MAX_FB_BUFFER];
40}; 41};
41 42
42static int check_fb_gem_memory_type(struct drm_device *drm_dev, 43static int check_fb_gem_memory_type(struct drm_device *drm_dev,
@@ -117,7 +118,7 @@ static struct drm_framebuffer_funcs exynos_drm_fb_funcs = {
117 118
118struct drm_framebuffer * 119struct drm_framebuffer *
119exynos_drm_framebuffer_init(struct drm_device *dev, 120exynos_drm_framebuffer_init(struct drm_device *dev,
120 struct drm_mode_fb_cmd2 *mode_cmd, 121 const struct drm_mode_fb_cmd2 *mode_cmd,
121 struct exynos_drm_gem **exynos_gem, 122 struct exynos_drm_gem **exynos_gem,
122 int count) 123 int count)
123{ 124{
@@ -135,6 +136,8 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
135 goto err; 136 goto err;
136 137
137 exynos_fb->exynos_gem[i] = exynos_gem[i]; 138 exynos_fb->exynos_gem[i] = exynos_gem[i];
139 exynos_fb->dma_addr[i] = exynos_gem[i]->dma_addr
140 + mode_cmd->offsets[i];
138 } 141 }
139 142
140 drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd); 143 drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
@@ -154,7 +157,7 @@ err:
154 157
155static struct drm_framebuffer * 158static struct drm_framebuffer *
156exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, 159exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
157 struct drm_mode_fb_cmd2 *mode_cmd) 160 const struct drm_mode_fb_cmd2 *mode_cmd)
158{ 161{
159 struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER]; 162 struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
160 struct drm_gem_object *obj; 163 struct drm_gem_object *obj;
@@ -189,21 +192,14 @@ err:
189 return ERR_PTR(ret); 192 return ERR_PTR(ret);
190} 193}
191 194
192struct exynos_drm_gem *exynos_drm_fb_gem(struct drm_framebuffer *fb, int index) 195dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index)
193{ 196{
194 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); 197 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
195 struct exynos_drm_gem *exynos_gem;
196 198
197 if (index >= MAX_FB_BUFFER) 199 if (index >= MAX_FB_BUFFER)
198 return NULL; 200 return DMA_ERROR_CODE;
199 201
200 exynos_gem = exynos_fb->exynos_gem[index]; 202 return exynos_fb->dma_addr[index];
201 if (!exynos_gem)
202 return NULL;
203
204 DRM_DEBUG_KMS("dma_addr: 0x%lx\n", (unsigned long)exynos_gem->dma_addr);
205
206 return exynos_gem;
207} 203}
208 204
209static void exynos_drm_output_poll_changed(struct drm_device *dev) 205static void exynos_drm_output_poll_changed(struct drm_device *dev)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h
index 726a2d44371f..4aae9dd2b0d1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h
@@ -18,12 +18,11 @@
18 18
19struct drm_framebuffer * 19struct drm_framebuffer *
20exynos_drm_framebuffer_init(struct drm_device *dev, 20exynos_drm_framebuffer_init(struct drm_device *dev,
21 struct drm_mode_fb_cmd2 *mode_cmd, 21 const struct drm_mode_fb_cmd2 *mode_cmd,
22 struct exynos_drm_gem **exynos_gem, 22 struct exynos_drm_gem **exynos_gem,
23 int count); 23 int count);
24 24
25/* get gem object of a drm framebuffer */ 25dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index);
26struct exynos_drm_gem *exynos_drm_fb_gem(struct drm_framebuffer *fb, int index);
27 26
28void exynos_drm_mode_config_init(struct drm_device *dev); 27void exynos_drm_mode_config_init(struct drm_device *dev);
29 28
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index bd75c1531cac..2e2247126581 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -29,6 +29,7 @@
29#include <drm/exynos_drm.h> 29#include <drm/exynos_drm.h>
30 30
31#include "exynos_drm_drv.h" 31#include "exynos_drm_drv.h"
32#include "exynos_drm_fb.h"
32#include "exynos_drm_fbdev.h" 33#include "exynos_drm_fbdev.h"
33#include "exynos_drm_crtc.h" 34#include "exynos_drm_crtc.h"
34#include "exynos_drm_plane.h" 35#include "exynos_drm_plane.h"
@@ -87,7 +88,6 @@
87 88
88/* FIMD has totally five hardware windows. */ 89/* FIMD has totally five hardware windows. */
89#define WINDOWS_NR 5 90#define WINDOWS_NR 5
90#define CURSOR_WIN 4
91 91
92struct fimd_driver_data { 92struct fimd_driver_data {
93 unsigned int timing_base; 93 unsigned int timing_base;
@@ -150,6 +150,7 @@ struct fimd_context {
150 struct drm_device *drm_dev; 150 struct drm_device *drm_dev;
151 struct exynos_drm_crtc *crtc; 151 struct exynos_drm_crtc *crtc;
152 struct exynos_drm_plane planes[WINDOWS_NR]; 152 struct exynos_drm_plane planes[WINDOWS_NR];
153 struct exynos_drm_plane_config configs[WINDOWS_NR];
153 struct clk *bus_clk; 154 struct clk *bus_clk;
154 struct clk *lcd_clk; 155 struct clk *lcd_clk;
155 void __iomem *regs; 156 void __iomem *regs;
@@ -187,6 +188,14 @@ static const struct of_device_id fimd_driver_dt_match[] = {
187}; 188};
188MODULE_DEVICE_TABLE(of, fimd_driver_dt_match); 189MODULE_DEVICE_TABLE(of, fimd_driver_dt_match);
189 190
191static const enum drm_plane_type fimd_win_types[WINDOWS_NR] = {
192 DRM_PLANE_TYPE_PRIMARY,
193 DRM_PLANE_TYPE_OVERLAY,
194 DRM_PLANE_TYPE_OVERLAY,
195 DRM_PLANE_TYPE_OVERLAY,
196 DRM_PLANE_TYPE_CURSOR,
197};
198
190static const uint32_t fimd_formats[] = { 199static const uint32_t fimd_formats[] = {
191 DRM_FORMAT_C8, 200 DRM_FORMAT_C8,
192 DRM_FORMAT_XRGB1555, 201 DRM_FORMAT_XRGB1555,
@@ -478,7 +487,7 @@ static void fimd_commit(struct exynos_drm_crtc *crtc)
478 487
479 488
480static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win, 489static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
481 struct drm_framebuffer *fb) 490 uint32_t pixel_format, int width)
482{ 491{
483 unsigned long val; 492 unsigned long val;
484 493
@@ -489,11 +498,11 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
489 * So the request format is ARGB8888 then change it to XRGB8888. 498 * So the request format is ARGB8888 then change it to XRGB8888.
490 */ 499 */
491 if (ctx->driver_data->has_limited_fmt && !win) { 500 if (ctx->driver_data->has_limited_fmt && !win) {
492 if (fb->pixel_format == DRM_FORMAT_ARGB8888) 501 if (pixel_format == DRM_FORMAT_ARGB8888)
493 fb->pixel_format = DRM_FORMAT_XRGB8888; 502 pixel_format = DRM_FORMAT_XRGB8888;
494 } 503 }
495 504
496 switch (fb->pixel_format) { 505 switch (pixel_format) {
497 case DRM_FORMAT_C8: 506 case DRM_FORMAT_C8:
498 val |= WINCON0_BPPMODE_8BPP_PALETTE; 507 val |= WINCON0_BPPMODE_8BPP_PALETTE;
499 val |= WINCONx_BURSTLEN_8WORD; 508 val |= WINCONx_BURSTLEN_8WORD;
@@ -529,17 +538,15 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
529 break; 538 break;
530 } 539 }
531 540
532 DRM_DEBUG_KMS("bpp = %d\n", fb->bits_per_pixel);
533
534 /* 541 /*
535 * In case of exynos, setting dma-burst to 16Word causes permanent 542 * Setting dma-burst to 16Word causes permanent tearing for very small
536 * tearing for very small buffers, e.g. cursor buffer. Burst Mode 543 * buffers, e.g. cursor buffer. Burst Mode switching which based on
537 * switching which is based on plane size is not recommended as 544 * plane size is not recommended as plane size varies alot towards the
538 * plane size varies alot towards the end of the screen and rapid 545 * end of the screen and rapid movement causes unstable DMA, but it is
539 * movement causes unstable DMA which results into iommu crash/tear. 546 * still better to change dma-burst than displaying garbage.
540 */ 547 */
541 548
542 if (fb->width < MIN_FB_WIDTH_FOR_16WORD_BURST) { 549 if (width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
543 val &= ~WINCONx_BURSTLEN_MASK; 550 val &= ~WINCONx_BURSTLEN_MASK;
544 val |= WINCONx_BURSTLEN_4WORD; 551 val |= WINCONx_BURSTLEN_4WORD;
545 } 552 }
@@ -640,39 +647,41 @@ static void fimd_atomic_flush(struct exynos_drm_crtc *crtc,
640static void fimd_update_plane(struct exynos_drm_crtc *crtc, 647static void fimd_update_plane(struct exynos_drm_crtc *crtc,
641 struct exynos_drm_plane *plane) 648 struct exynos_drm_plane *plane)
642{ 649{
650 struct exynos_drm_plane_state *state =
651 to_exynos_plane_state(plane->base.state);
643 struct fimd_context *ctx = crtc->ctx; 652 struct fimd_context *ctx = crtc->ctx;
644 struct drm_plane_state *state = plane->base.state; 653 struct drm_framebuffer *fb = state->base.fb;
645 dma_addr_t dma_addr; 654 dma_addr_t dma_addr;
646 unsigned long val, size, offset; 655 unsigned long val, size, offset;
647 unsigned int last_x, last_y, buf_offsize, line_size; 656 unsigned int last_x, last_y, buf_offsize, line_size;
648 unsigned int win = plane->zpos; 657 unsigned int win = plane->zpos;
649 unsigned int bpp = state->fb->bits_per_pixel >> 3; 658 unsigned int bpp = fb->bits_per_pixel >> 3;
650 unsigned int pitch = state->fb->pitches[0]; 659 unsigned int pitch = fb->pitches[0];
651 660
652 if (ctx->suspended) 661 if (ctx->suspended)
653 return; 662 return;
654 663
655 offset = plane->src_x * bpp; 664 offset = state->src.x * bpp;
656 offset += plane->src_y * pitch; 665 offset += state->src.y * pitch;
657 666
658 /* buffer start address */ 667 /* buffer start address */
659 dma_addr = plane->dma_addr[0] + offset; 668 dma_addr = exynos_drm_fb_dma_addr(fb, 0) + offset;
660 val = (unsigned long)dma_addr; 669 val = (unsigned long)dma_addr;
661 writel(val, ctx->regs + VIDWx_BUF_START(win, 0)); 670 writel(val, ctx->regs + VIDWx_BUF_START(win, 0));
662 671
663 /* buffer end address */ 672 /* buffer end address */
664 size = pitch * plane->crtc_h; 673 size = pitch * state->crtc.h;
665 val = (unsigned long)(dma_addr + size); 674 val = (unsigned long)(dma_addr + size);
666 writel(val, ctx->regs + VIDWx_BUF_END(win, 0)); 675 writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
667 676
668 DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n", 677 DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n",
669 (unsigned long)dma_addr, val, size); 678 (unsigned long)dma_addr, val, size);
670 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", 679 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
671 plane->crtc_w, plane->crtc_h); 680 state->crtc.w, state->crtc.h);
672 681
673 /* buffer size */ 682 /* buffer size */
674 buf_offsize = pitch - (plane->crtc_w * bpp); 683 buf_offsize = pitch - (state->crtc.w * bpp);
675 line_size = plane->crtc_w * bpp; 684 line_size = state->crtc.w * bpp;
676 val = VIDW_BUF_SIZE_OFFSET(buf_offsize) | 685 val = VIDW_BUF_SIZE_OFFSET(buf_offsize) |
677 VIDW_BUF_SIZE_PAGEWIDTH(line_size) | 686 VIDW_BUF_SIZE_PAGEWIDTH(line_size) |
678 VIDW_BUF_SIZE_OFFSET_E(buf_offsize) | 687 VIDW_BUF_SIZE_OFFSET_E(buf_offsize) |
@@ -680,16 +689,16 @@ static void fimd_update_plane(struct exynos_drm_crtc *crtc,
680 writel(val, ctx->regs + VIDWx_BUF_SIZE(win, 0)); 689 writel(val, ctx->regs + VIDWx_BUF_SIZE(win, 0));
681 690
682 /* OSD position */ 691 /* OSD position */
683 val = VIDOSDxA_TOPLEFT_X(plane->crtc_x) | 692 val = VIDOSDxA_TOPLEFT_X(state->crtc.x) |
684 VIDOSDxA_TOPLEFT_Y(plane->crtc_y) | 693 VIDOSDxA_TOPLEFT_Y(state->crtc.y) |
685 VIDOSDxA_TOPLEFT_X_E(plane->crtc_x) | 694 VIDOSDxA_TOPLEFT_X_E(state->crtc.x) |
686 VIDOSDxA_TOPLEFT_Y_E(plane->crtc_y); 695 VIDOSDxA_TOPLEFT_Y_E(state->crtc.y);
687 writel(val, ctx->regs + VIDOSD_A(win)); 696 writel(val, ctx->regs + VIDOSD_A(win));
688 697
689 last_x = plane->crtc_x + plane->crtc_w; 698 last_x = state->crtc.x + state->crtc.w;
690 if (last_x) 699 if (last_x)
691 last_x--; 700 last_x--;
692 last_y = plane->crtc_y + plane->crtc_h; 701 last_y = state->crtc.y + state->crtc.h;
693 if (last_y) 702 if (last_y)
694 last_y--; 703 last_y--;
695 704
@@ -699,20 +708,20 @@ static void fimd_update_plane(struct exynos_drm_crtc *crtc,
699 writel(val, ctx->regs + VIDOSD_B(win)); 708 writel(val, ctx->regs + VIDOSD_B(win));
700 709
701 DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n", 710 DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
702 plane->crtc_x, plane->crtc_y, last_x, last_y); 711 state->crtc.x, state->crtc.y, last_x, last_y);
703 712
704 /* OSD size */ 713 /* OSD size */
705 if (win != 3 && win != 4) { 714 if (win != 3 && win != 4) {
706 u32 offset = VIDOSD_D(win); 715 u32 offset = VIDOSD_D(win);
707 if (win == 0) 716 if (win == 0)
708 offset = VIDOSD_C(win); 717 offset = VIDOSD_C(win);
709 val = plane->crtc_w * plane->crtc_h; 718 val = state->crtc.w * state->crtc.h;
710 writel(val, ctx->regs + offset); 719 writel(val, ctx->regs + offset);
711 720
712 DRM_DEBUG_KMS("osd size = 0x%x\n", (unsigned int)val); 721 DRM_DEBUG_KMS("osd size = 0x%x\n", (unsigned int)val);
713 } 722 }
714 723
715 fimd_win_set_pixfmt(ctx, win, state->fb); 724 fimd_win_set_pixfmt(ctx, win, fb->pixel_format, state->src.w);
716 725
717 /* hardware window 0 doesn't support color key. */ 726 /* hardware window 0 doesn't support color key. */
718 if (win != 0) 727 if (win != 0)
@@ -745,7 +754,6 @@ static void fimd_disable_plane(struct exynos_drm_crtc *crtc,
745static void fimd_enable(struct exynos_drm_crtc *crtc) 754static void fimd_enable(struct exynos_drm_crtc *crtc)
746{ 755{
747 struct fimd_context *ctx = crtc->ctx; 756 struct fimd_context *ctx = crtc->ctx;
748 int ret;
749 757
750 if (!ctx->suspended) 758 if (!ctx->suspended)
751 return; 759 return;
@@ -754,18 +762,6 @@ static void fimd_enable(struct exynos_drm_crtc *crtc)
754 762
755 pm_runtime_get_sync(ctx->dev); 763 pm_runtime_get_sync(ctx->dev);
756 764
757 ret = clk_prepare_enable(ctx->bus_clk);
758 if (ret < 0) {
759 DRM_ERROR("Failed to prepare_enable the bus clk [%d]\n", ret);
760 return;
761 }
762
763 ret = clk_prepare_enable(ctx->lcd_clk);
764 if (ret < 0) {
765 DRM_ERROR("Failed to prepare_enable the lcd clk [%d]\n", ret);
766 return;
767 }
768
769 /* if vblank was enabled status, enable it again. */ 765 /* if vblank was enabled status, enable it again. */
770 if (test_and_clear_bit(0, &ctx->irq_flags)) 766 if (test_and_clear_bit(0, &ctx->irq_flags))
771 fimd_enable_vblank(ctx->crtc); 767 fimd_enable_vblank(ctx->crtc);
@@ -795,11 +791,7 @@ static void fimd_disable(struct exynos_drm_crtc *crtc)
795 791
796 writel(0, ctx->regs + VIDCON0); 792 writel(0, ctx->regs + VIDCON0);
797 793
798 clk_disable_unprepare(ctx->lcd_clk);
799 clk_disable_unprepare(ctx->bus_clk);
800
801 pm_runtime_put_sync(ctx->dev); 794 pm_runtime_put_sync(ctx->dev);
802
803 ctx->suspended = true; 795 ctx->suspended = true;
804} 796}
805 797
@@ -941,18 +933,19 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
941 struct drm_device *drm_dev = data; 933 struct drm_device *drm_dev = data;
942 struct exynos_drm_private *priv = drm_dev->dev_private; 934 struct exynos_drm_private *priv = drm_dev->dev_private;
943 struct exynos_drm_plane *exynos_plane; 935 struct exynos_drm_plane *exynos_plane;
944 enum drm_plane_type type; 936 unsigned int i;
945 unsigned int zpos;
946 int ret; 937 int ret;
947 938
948 ctx->drm_dev = drm_dev; 939 ctx->drm_dev = drm_dev;
949 ctx->pipe = priv->pipe++; 940 ctx->pipe = priv->pipe++;
950 941
951 for (zpos = 0; zpos < WINDOWS_NR; zpos++) { 942 for (i = 0; i < WINDOWS_NR; i++) {
952 type = exynos_plane_get_type(zpos, CURSOR_WIN); 943 ctx->configs[i].pixel_formats = fimd_formats;
953 ret = exynos_plane_init(drm_dev, &ctx->planes[zpos], 944 ctx->configs[i].num_pixel_formats = ARRAY_SIZE(fimd_formats);
954 1 << ctx->pipe, type, fimd_formats, 945 ctx->configs[i].zpos = i;
955 ARRAY_SIZE(fimd_formats), zpos); 946 ctx->configs[i].type = fimd_win_types[i];
947 ret = exynos_plane_init(drm_dev, &ctx->planes[i],
948 1 << ctx->pipe, &ctx->configs[i]);
956 if (ret) 949 if (ret)
957 return ret; 950 return ret;
958 } 951 }
@@ -1121,12 +1114,49 @@ static int fimd_remove(struct platform_device *pdev)
1121 return 0; 1114 return 0;
1122} 1115}
1123 1116
1117#ifdef CONFIG_PM
1118static int exynos_fimd_suspend(struct device *dev)
1119{
1120 struct fimd_context *ctx = dev_get_drvdata(dev);
1121
1122 clk_disable_unprepare(ctx->lcd_clk);
1123 clk_disable_unprepare(ctx->bus_clk);
1124
1125 return 0;
1126}
1127
1128static int exynos_fimd_resume(struct device *dev)
1129{
1130 struct fimd_context *ctx = dev_get_drvdata(dev);
1131 int ret;
1132
1133 ret = clk_prepare_enable(ctx->bus_clk);
1134 if (ret < 0) {
1135 DRM_ERROR("Failed to prepare_enable the bus clk [%d]\n", ret);
1136 return ret;
1137 }
1138
1139 ret = clk_prepare_enable(ctx->lcd_clk);
1140 if (ret < 0) {
1141 DRM_ERROR("Failed to prepare_enable the lcd clk [%d]\n", ret);
1142 return ret;
1143 }
1144
1145 return 0;
1146}
1147#endif
1148
1149static const struct dev_pm_ops exynos_fimd_pm_ops = {
1150 SET_RUNTIME_PM_OPS(exynos_fimd_suspend, exynos_fimd_resume, NULL)
1151};
1152
1124struct platform_driver fimd_driver = { 1153struct platform_driver fimd_driver = {
1125 .probe = fimd_probe, 1154 .probe = fimd_probe,
1126 .remove = fimd_remove, 1155 .remove = fimd_remove,
1127 .driver = { 1156 .driver = {
1128 .name = "exynos4-fb", 1157 .name = "exynos4-fb",
1129 .owner = THIS_MODULE, 1158 .owner = THIS_MODULE,
1159 .pm = &exynos_fimd_pm_ops,
1130 .of_match_table = fimd_driver_dt_match, 1160 .of_match_table = fimd_driver_dt_match,
1131 }, 1161 },
1132}; 1162};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 37ab8b282db6..9ca5047959ec 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -55,8 +55,6 @@ struct exynos_drm_gem {
55 struct sg_table *sgt; 55 struct sg_table *sgt;
56}; 56};
57 57
58struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
59
60/* destroy a buffer with gem object */ 58/* destroy a buffer with gem object */
61void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem); 59void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem);
62 60
@@ -91,10 +89,6 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
91 unsigned int gem_handle, 89 unsigned int gem_handle,
92 struct drm_file *filp); 90 struct drm_file *filp);
93 91
94/* map user space allocated by malloc to pages. */
95int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
96 struct drm_file *file_priv);
97
98/* get buffer information to memory region allocated by gem. */ 92/* get buffer information to memory region allocated by gem. */
99int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, 93int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
100 struct drm_file *file_priv); 94 struct drm_file *file_priv);
@@ -123,28 +117,6 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
123/* set vm_flags and we can change the vm attribute to other one at here. */ 117/* set vm_flags and we can change the vm attribute to other one at here. */
124int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); 118int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
125 119
126static inline int vma_is_io(struct vm_area_struct *vma)
127{
128 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
129}
130
131/* get a copy of a virtual memory region. */
132struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma);
133
134/* release a userspace virtual memory area. */
135void exynos_gem_put_vma(struct vm_area_struct *vma);
136
137/* get pages from user space. */
138int exynos_gem_get_pages_from_userptr(unsigned long start,
139 unsigned int npages,
140 struct page **pages,
141 struct vm_area_struct *vma);
142
143/* drop the reference to pages. */
144void exynos_gem_put_pages_to_userptr(struct page **pages,
145 unsigned int npages,
146 struct vm_area_struct *vma);
147
148/* map sgt with dma region. */ 120/* map sgt with dma region. */
149int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev, 121int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
150 struct sg_table *sgt, 122 struct sg_table *sgt,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 11b87d2a7913..7aecd23cfa11 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -15,7 +15,8 @@
15#include <linux/platform_device.h> 15#include <linux/platform_device.h>
16#include <linux/clk.h> 16#include <linux/clk.h>
17#include <linux/pm_runtime.h> 17#include <linux/pm_runtime.h>
18#include <plat/map-base.h> 18#include <linux/mfd/syscon.h>
19#include <linux/regmap.h>
19 20
20#include <drm/drmP.h> 21#include <drm/drmP.h>
21#include <drm/exynos_drm.h> 22#include <drm/exynos_drm.h>
@@ -126,6 +127,7 @@ struct gsc_capability {
126 * @ippdrv: prepare initialization using ippdrv. 127 * @ippdrv: prepare initialization using ippdrv.
127 * @regs_res: register resources. 128 * @regs_res: register resources.
128 * @regs: memory mapped io registers. 129 * @regs: memory mapped io registers.
130 * @sysreg: handle to SYSREG block regmap.
129 * @lock: locking of operations. 131 * @lock: locking of operations.
130 * @gsc_clk: gsc gate clock. 132 * @gsc_clk: gsc gate clock.
131 * @sc: scaler infomations. 133 * @sc: scaler infomations.
@@ -138,6 +140,7 @@ struct gsc_context {
138 struct exynos_drm_ippdrv ippdrv; 140 struct exynos_drm_ippdrv ippdrv;
139 struct resource *regs_res; 141 struct resource *regs_res;
140 void __iomem *regs; 142 void __iomem *regs;
143 struct regmap *sysreg;
141 struct mutex lock; 144 struct mutex lock;
142 struct clk *gsc_clk; 145 struct clk *gsc_clk;
143 struct gsc_scaler sc; 146 struct gsc_scaler sc;
@@ -437,9 +440,12 @@ static int gsc_sw_reset(struct gsc_context *ctx)
437 440
438static void gsc_set_gscblk_fimd_wb(struct gsc_context *ctx, bool enable) 441static void gsc_set_gscblk_fimd_wb(struct gsc_context *ctx, bool enable)
439{ 442{
440 u32 gscblk_cfg; 443 unsigned int gscblk_cfg;
441 444
442 gscblk_cfg = readl(SYSREG_GSCBLK_CFG1); 445 if (!ctx->sysreg)
446 return;
447
448 regmap_read(ctx->sysreg, SYSREG_GSCBLK_CFG1, &gscblk_cfg);
443 449
444 if (enable) 450 if (enable)
445 gscblk_cfg |= GSC_BLK_DISP1WB_DEST(ctx->id) | 451 gscblk_cfg |= GSC_BLK_DISP1WB_DEST(ctx->id) |
@@ -448,7 +454,7 @@ static void gsc_set_gscblk_fimd_wb(struct gsc_context *ctx, bool enable)
448 else 454 else
449 gscblk_cfg |= GSC_BLK_PXLASYNC_LO_MASK_WB(ctx->id); 455 gscblk_cfg |= GSC_BLK_PXLASYNC_LO_MASK_WB(ctx->id);
450 456
451 writel(gscblk_cfg, SYSREG_GSCBLK_CFG1); 457 regmap_write(ctx->sysreg, SYSREG_GSCBLK_CFG1, gscblk_cfg);
452} 458}
453 459
454static void gsc_handle_irq(struct gsc_context *ctx, bool enable, 460static void gsc_handle_irq(struct gsc_context *ctx, bool enable,
@@ -1215,10 +1221,10 @@ static int gsc_clk_ctrl(struct gsc_context *ctx, bool enable)
1215 DRM_DEBUG_KMS("enable[%d]\n", enable); 1221 DRM_DEBUG_KMS("enable[%d]\n", enable);
1216 1222
1217 if (enable) { 1223 if (enable) {
1218 clk_enable(ctx->gsc_clk); 1224 clk_prepare_enable(ctx->gsc_clk);
1219 ctx->suspended = false; 1225 ctx->suspended = false;
1220 } else { 1226 } else {
1221 clk_disable(ctx->gsc_clk); 1227 clk_disable_unprepare(ctx->gsc_clk);
1222 ctx->suspended = true; 1228 ctx->suspended = true;
1223 } 1229 }
1224 1230
@@ -1663,6 +1669,15 @@ static int gsc_probe(struct platform_device *pdev)
1663 if (!ctx) 1669 if (!ctx)
1664 return -ENOMEM; 1670 return -ENOMEM;
1665 1671
1672 if (dev->of_node) {
1673 ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
1674 "samsung,sysreg");
1675 if (IS_ERR(ctx->sysreg)) {
1676 dev_warn(dev, "failed to get system register.\n");
1677 ctx->sysreg = NULL;
1678 }
1679 }
1680
1666 /* clock control */ 1681 /* clock control */
1667 ctx->gsc_clk = devm_clk_get(dev, "gscl"); 1682 ctx->gsc_clk = devm_clk_get(dev, "gscl");
1668 if (IS_ERR(ctx->gsc_clk)) { 1683 if (IS_ERR(ctx->gsc_clk)) {
@@ -1713,7 +1728,6 @@ static int gsc_probe(struct platform_device *pdev)
1713 mutex_init(&ctx->lock); 1728 mutex_init(&ctx->lock);
1714 platform_set_drvdata(pdev, ctx); 1729 platform_set_drvdata(pdev, ctx);
1715 1730
1716 pm_runtime_set_active(dev);
1717 pm_runtime_enable(dev); 1731 pm_runtime_enable(dev);
1718 1732
1719 ret = exynos_drm_ippdrv_register(ippdrv); 1733 ret = exynos_drm_ippdrv_register(ippdrv);
@@ -1797,6 +1811,12 @@ static const struct dev_pm_ops gsc_pm_ops = {
1797 SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL) 1811 SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL)
1798}; 1812};
1799 1813
1814static const struct of_device_id exynos_drm_gsc_of_match[] = {
1815 { .compatible = "samsung,exynos5-gsc" },
1816 { },
1817};
1818MODULE_DEVICE_TABLE(of, exynos_drm_gsc_of_match);
1819
1800struct platform_driver gsc_driver = { 1820struct platform_driver gsc_driver = {
1801 .probe = gsc_probe, 1821 .probe = gsc_probe,
1802 .remove = gsc_remove, 1822 .remove = gsc_remove,
@@ -1804,6 +1824,7 @@ struct platform_driver gsc_driver = {
1804 .name = "exynos-drm-gsc", 1824 .name = "exynos-drm-gsc",
1805 .owner = THIS_MODULE, 1825 .owner = THIS_MODULE,
1806 .pm = &gsc_pm_ops, 1826 .pm = &gsc_pm_ops,
1827 .of_match_table = of_match_ptr(exynos_drm_gsc_of_match),
1807 }, 1828 },
1808}; 1829};
1809 1830
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 179311760bb7..e668fcdbcafc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -56,93 +56,170 @@ static int exynos_plane_get_size(int start, unsigned length, unsigned last)
56 return size; 56 return size;
57} 57}
58 58
59static void exynos_plane_mode_set(struct drm_plane *plane, 59static void exynos_plane_mode_set(struct exynos_drm_plane_state *exynos_state)
60 struct drm_crtc *crtc, 60
61 struct drm_framebuffer *fb,
62 int crtc_x, int crtc_y,
63 unsigned int crtc_w, unsigned int crtc_h,
64 uint32_t src_x, uint32_t src_y,
65 uint32_t src_w, uint32_t src_h)
66{ 61{
67 struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane); 62 struct drm_plane_state *state = &exynos_state->base;
63 struct drm_crtc *crtc = exynos_state->base.crtc;
68 struct drm_display_mode *mode = &crtc->state->adjusted_mode; 64 struct drm_display_mode *mode = &crtc->state->adjusted_mode;
65 int crtc_x, crtc_y;
66 unsigned int crtc_w, crtc_h;
67 unsigned int src_x, src_y;
68 unsigned int src_w, src_h;
69 unsigned int actual_w; 69 unsigned int actual_w;
70 unsigned int actual_h; 70 unsigned int actual_h;
71 71
72 /*
73 * The original src/dest coordinates are stored in exynos_state->base,
74 * but we want to keep another copy internal to our driver that we can
75 * clip/modify ourselves.
76 */
77
78 crtc_x = state->crtc_x;
79 crtc_y = state->crtc_y;
80 crtc_w = state->crtc_w;
81 crtc_h = state->crtc_h;
82
83 src_x = state->src_x >> 16;
84 src_y = state->src_y >> 16;
85 src_w = state->src_w >> 16;
86 src_h = state->src_h >> 16;
87
88 /* set ratio */
89 exynos_state->h_ratio = (src_w << 16) / crtc_w;
90 exynos_state->v_ratio = (src_h << 16) / crtc_h;
91
92 /* clip to visible area */
72 actual_w = exynos_plane_get_size(crtc_x, crtc_w, mode->hdisplay); 93 actual_w = exynos_plane_get_size(crtc_x, crtc_w, mode->hdisplay);
73 actual_h = exynos_plane_get_size(crtc_y, crtc_h, mode->vdisplay); 94 actual_h = exynos_plane_get_size(crtc_y, crtc_h, mode->vdisplay);
74 95
75 if (crtc_x < 0) { 96 if (crtc_x < 0) {
76 if (actual_w) 97 if (actual_w)
77 src_x -= crtc_x; 98 src_x += ((-crtc_x) * exynos_state->h_ratio) >> 16;
78 crtc_x = 0; 99 crtc_x = 0;
79 } 100 }
80 101
81 if (crtc_y < 0) { 102 if (crtc_y < 0) {
82 if (actual_h) 103 if (actual_h)
83 src_y -= crtc_y; 104 src_y += ((-crtc_y) * exynos_state->v_ratio) >> 16;
84 crtc_y = 0; 105 crtc_y = 0;
85 } 106 }
86 107
87 /* set ratio */
88 exynos_plane->h_ratio = (src_w << 16) / crtc_w;
89 exynos_plane->v_ratio = (src_h << 16) / crtc_h;
90
91 /* set drm framebuffer data. */ 108 /* set drm framebuffer data. */
92 exynos_plane->src_x = src_x; 109 exynos_state->src.x = src_x;
93 exynos_plane->src_y = src_y; 110 exynos_state->src.y = src_y;
94 exynos_plane->src_w = (actual_w * exynos_plane->h_ratio) >> 16; 111 exynos_state->src.w = (actual_w * exynos_state->h_ratio) >> 16;
95 exynos_plane->src_h = (actual_h * exynos_plane->v_ratio) >> 16; 112 exynos_state->src.h = (actual_h * exynos_state->v_ratio) >> 16;
96 113
97 /* set plane range to be displayed. */ 114 /* set plane range to be displayed. */
98 exynos_plane->crtc_x = crtc_x; 115 exynos_state->crtc.x = crtc_x;
99 exynos_plane->crtc_y = crtc_y; 116 exynos_state->crtc.y = crtc_y;
100 exynos_plane->crtc_w = actual_w; 117 exynos_state->crtc.w = actual_w;
101 exynos_plane->crtc_h = actual_h; 118 exynos_state->crtc.h = actual_h;
102 119
103 DRM_DEBUG_KMS("plane : offset_x/y(%d,%d), width/height(%d,%d)", 120 DRM_DEBUG_KMS("plane : offset_x/y(%d,%d), width/height(%d,%d)",
104 exynos_plane->crtc_x, exynos_plane->crtc_y, 121 exynos_state->crtc.x, exynos_state->crtc.y,
105 exynos_plane->crtc_w, exynos_plane->crtc_h); 122 exynos_state->crtc.w, exynos_state->crtc.h);
123}
124
125static void exynos_drm_plane_reset(struct drm_plane *plane)
126{
127 struct exynos_drm_plane_state *exynos_state;
128
129 if (plane->state) {
130 exynos_state = to_exynos_plane_state(plane->state);
131 if (exynos_state->base.fb)
132 drm_framebuffer_unreference(exynos_state->base.fb);
133 kfree(exynos_state);
134 plane->state = NULL;
135 }
136
137 exynos_state = kzalloc(sizeof(*exynos_state), GFP_KERNEL);
138 if (exynos_state) {
139 plane->state = &exynos_state->base;
140 plane->state->plane = plane;
141 }
142}
143
144static struct drm_plane_state *
145exynos_drm_plane_duplicate_state(struct drm_plane *plane)
146{
147 struct exynos_drm_plane_state *exynos_state;
148 struct exynos_drm_plane_state *copy;
149
150 exynos_state = to_exynos_plane_state(plane->state);
151 copy = kzalloc(sizeof(*exynos_state), GFP_KERNEL);
152 if (!copy)
153 return NULL;
154
155 __drm_atomic_helper_plane_duplicate_state(plane, &copy->base);
156 return &copy->base;
157}
106 158
107 plane->crtc = crtc; 159static void exynos_drm_plane_destroy_state(struct drm_plane *plane,
160 struct drm_plane_state *old_state)
161{
162 struct exynos_drm_plane_state *old_exynos_state =
163 to_exynos_plane_state(old_state);
164 __drm_atomic_helper_plane_destroy_state(plane, old_state);
165 kfree(old_exynos_state);
108} 166}
109 167
110static struct drm_plane_funcs exynos_plane_funcs = { 168static struct drm_plane_funcs exynos_plane_funcs = {
111 .update_plane = drm_atomic_helper_update_plane, 169 .update_plane = drm_atomic_helper_update_plane,
112 .disable_plane = drm_atomic_helper_disable_plane, 170 .disable_plane = drm_atomic_helper_disable_plane,
113 .destroy = drm_plane_cleanup, 171 .destroy = drm_plane_cleanup,
114 .reset = drm_atomic_helper_plane_reset, 172 .reset = exynos_drm_plane_reset,
115 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, 173 .atomic_duplicate_state = exynos_drm_plane_duplicate_state,
116 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, 174 .atomic_destroy_state = exynos_drm_plane_destroy_state,
117}; 175};
118 176
177static int
178exynos_drm_plane_check_size(const struct exynos_drm_plane_config *config,
179 struct exynos_drm_plane_state *state)
180{
181 bool width_ok = false, height_ok = false;
182
183 if (config->capabilities & EXYNOS_DRM_PLANE_CAP_SCALE)
184 return 0;
185
186 if (state->src.w == state->crtc.w)
187 width_ok = true;
188
189 if (state->src.h == state->crtc.h)
190 height_ok = true;
191
192 if ((config->capabilities & EXYNOS_DRM_PLANE_CAP_DOUBLE) &&
193 state->h_ratio == (1 << 15))
194 width_ok = true;
195
196 if ((config->capabilities & EXYNOS_DRM_PLANE_CAP_DOUBLE) &&
197 state->v_ratio == (1 << 15))
198 height_ok = true;
199
200 if (width_ok & height_ok)
201 return 0;
202
203 DRM_DEBUG_KMS("scaling mode is not supported");
204 return -ENOTSUPP;
205}
206
119static int exynos_plane_atomic_check(struct drm_plane *plane, 207static int exynos_plane_atomic_check(struct drm_plane *plane,
120 struct drm_plane_state *state) 208 struct drm_plane_state *state)
121{ 209{
122 struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane); 210 struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
123 int nr; 211 struct exynos_drm_plane_state *exynos_state =
124 int i; 212 to_exynos_plane_state(state);
213 int ret = 0;
125 214
126 if (!state->fb) 215 if (!state->crtc || !state->fb)
127 return 0; 216 return 0;
128 217
129 nr = drm_format_num_planes(state->fb->pixel_format); 218 /* translate state into exynos_state */
130 for (i = 0; i < nr; i++) { 219 exynos_plane_mode_set(exynos_state);
131 struct exynos_drm_gem *exynos_gem =
132 exynos_drm_fb_gem(state->fb, i);
133 if (!exynos_gem) {
134 DRM_DEBUG_KMS("gem object is null\n");
135 return -EFAULT;
136 }
137
138 exynos_plane->dma_addr[i] = exynos_gem->dma_addr +
139 state->fb->offsets[i];
140
141 DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n",
142 i, (unsigned long)exynos_plane->dma_addr[i]);
143 }
144 220
145 return 0; 221 ret = exynos_drm_plane_check_size(exynos_plane->config, exynos_state);
222 return ret;
146} 223}
147 224
148static void exynos_plane_atomic_update(struct drm_plane *plane, 225static void exynos_plane_atomic_update(struct drm_plane *plane,
@@ -155,12 +232,7 @@ static void exynos_plane_atomic_update(struct drm_plane *plane,
155 if (!state->crtc) 232 if (!state->crtc)
156 return; 233 return;
157 234
158 exynos_plane_mode_set(plane, state->crtc, state->fb, 235 plane->crtc = state->crtc;
159 state->crtc_x, state->crtc_y,
160 state->crtc_w, state->crtc_h,
161 state->src_x >> 16, state->src_y >> 16,
162 state->src_w >> 16, state->src_h >> 16);
163
164 exynos_plane->pending_fb = state->fb; 236 exynos_plane->pending_fb = state->fb;
165 237
166 if (exynos_crtc->ops->update_plane) 238 if (exynos_crtc->ops->update_plane)
@@ -177,8 +249,7 @@ static void exynos_plane_atomic_disable(struct drm_plane *plane,
177 return; 249 return;
178 250
179 if (exynos_crtc->ops->disable_plane) 251 if (exynos_crtc->ops->disable_plane)
180 exynos_crtc->ops->disable_plane(exynos_crtc, 252 exynos_crtc->ops->disable_plane(exynos_crtc, exynos_plane);
181 exynos_plane);
182} 253}
183 254
184static const struct drm_plane_helper_funcs plane_helper_funcs = { 255static const struct drm_plane_helper_funcs plane_helper_funcs = {
@@ -207,28 +278,19 @@ static void exynos_plane_attach_zpos_property(struct drm_plane *plane,
207 drm_object_attach_property(&plane->base, prop, zpos); 278 drm_object_attach_property(&plane->base, prop, zpos);
208} 279}
209 280
210enum drm_plane_type exynos_plane_get_type(unsigned int zpos,
211 unsigned int cursor_win)
212{
213 if (zpos == DEFAULT_WIN)
214 return DRM_PLANE_TYPE_PRIMARY;
215 else if (zpos == cursor_win)
216 return DRM_PLANE_TYPE_CURSOR;
217 else
218 return DRM_PLANE_TYPE_OVERLAY;
219}
220
221int exynos_plane_init(struct drm_device *dev, 281int exynos_plane_init(struct drm_device *dev,
222 struct exynos_drm_plane *exynos_plane, 282 struct exynos_drm_plane *exynos_plane,
223 unsigned long possible_crtcs, enum drm_plane_type type, 283 unsigned long possible_crtcs,
224 const uint32_t *formats, unsigned int fcount, 284 const struct exynos_drm_plane_config *config)
225 unsigned int zpos)
226{ 285{
227 int err; 286 int err;
228 287
229 err = drm_universal_plane_init(dev, &exynos_plane->base, possible_crtcs, 288 err = drm_universal_plane_init(dev, &exynos_plane->base,
230 &exynos_plane_funcs, formats, fcount, 289 possible_crtcs,
231 type); 290 &exynos_plane_funcs,
291 config->pixel_formats,
292 config->num_pixel_formats,
293 config->type, NULL);
232 if (err) { 294 if (err) {
233 DRM_ERROR("failed to initialize plane\n"); 295 DRM_ERROR("failed to initialize plane\n");
234 return err; 296 return err;
@@ -236,10 +298,12 @@ int exynos_plane_init(struct drm_device *dev,
236 298
237 drm_plane_helper_add(&exynos_plane->base, &plane_helper_funcs); 299 drm_plane_helper_add(&exynos_plane->base, &plane_helper_funcs);
238 300
239 exynos_plane->zpos = zpos; 301 exynos_plane->zpos = config->zpos;
302 exynos_plane->config = config;
240 303
241 if (type == DRM_PLANE_TYPE_OVERLAY) 304 if (config->type == DRM_PLANE_TYPE_OVERLAY)
242 exynos_plane_attach_zpos_property(&exynos_plane->base, zpos); 305 exynos_plane_attach_zpos_property(&exynos_plane->base,
306 config->zpos);
243 307
244 return 0; 308 return 0;
245} 309}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.h b/drivers/gpu/drm/exynos/exynos_drm_plane.h
index abb641e64c23..0dd096548284 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.h
@@ -9,10 +9,7 @@
9 * 9 *
10 */ 10 */
11 11
12enum drm_plane_type exynos_plane_get_type(unsigned int zpos,
13 unsigned int cursor_win);
14int exynos_plane_init(struct drm_device *dev, 12int exynos_plane_init(struct drm_device *dev,
15 struct exynos_drm_plane *exynos_plane, 13 struct exynos_drm_plane *exynos_plane,
16 unsigned long possible_crtcs, enum drm_plane_type type, 14 unsigned long possible_crtcs,
17 const uint32_t *formats, unsigned int fcount, 15 const struct exynos_drm_plane_config *config);
18 unsigned int zpos);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 2f5c118f4c8e..bea0f7826d30 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -790,10 +790,10 @@ static int rotator_remove(struct platform_device *pdev)
790static int rotator_clk_crtl(struct rot_context *rot, bool enable) 790static int rotator_clk_crtl(struct rot_context *rot, bool enable)
791{ 791{
792 if (enable) { 792 if (enable) {
793 clk_enable(rot->clock); 793 clk_prepare_enable(rot->clock);
794 rot->suspended = false; 794 rot->suspended = false;
795 } else { 795 } else {
796 clk_disable(rot->clock); 796 clk_disable_unprepare(rot->clock);
797 rot->suspended = true; 797 rot->suspended = true;
798 } 798 }
799 799
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 669362c53f49..319aa31954d1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -24,12 +24,12 @@
24 24
25#include "exynos_drm_drv.h" 25#include "exynos_drm_drv.h"
26#include "exynos_drm_crtc.h" 26#include "exynos_drm_crtc.h"
27#include "exynos_drm_fb.h"
27#include "exynos_drm_plane.h" 28#include "exynos_drm_plane.h"
28#include "exynos_drm_vidi.h" 29#include "exynos_drm_vidi.h"
29 30
30/* vidi has totally three virtual windows. */ 31/* vidi has totally three virtual windows. */
31#define WINDOWS_NR 3 32#define WINDOWS_NR 3
32#define CURSOR_WIN 2
33 33
34#define ctx_from_connector(c) container_of(c, struct vidi_context, \ 34#define ctx_from_connector(c) container_of(c, struct vidi_context, \
35 connector) 35 connector)
@@ -89,6 +89,12 @@ static const uint32_t formats[] = {
89 DRM_FORMAT_NV12, 89 DRM_FORMAT_NV12,
90}; 90};
91 91
92static const enum drm_plane_type vidi_win_types[WINDOWS_NR] = {
93 DRM_PLANE_TYPE_PRIMARY,
94 DRM_PLANE_TYPE_OVERLAY,
95 DRM_PLANE_TYPE_CURSOR,
96};
97
92static int vidi_enable_vblank(struct exynos_drm_crtc *crtc) 98static int vidi_enable_vblank(struct exynos_drm_crtc *crtc)
93{ 99{
94 struct vidi_context *ctx = crtc->ctx; 100 struct vidi_context *ctx = crtc->ctx;
@@ -125,12 +131,15 @@ static void vidi_disable_vblank(struct exynos_drm_crtc *crtc)
125static void vidi_update_plane(struct exynos_drm_crtc *crtc, 131static void vidi_update_plane(struct exynos_drm_crtc *crtc,
126 struct exynos_drm_plane *plane) 132 struct exynos_drm_plane *plane)
127{ 133{
134 struct drm_plane_state *state = plane->base.state;
128 struct vidi_context *ctx = crtc->ctx; 135 struct vidi_context *ctx = crtc->ctx;
136 dma_addr_t addr;
129 137
130 if (ctx->suspended) 138 if (ctx->suspended)
131 return; 139 return;
132 140
133 DRM_DEBUG_KMS("dma_addr = %pad\n", plane->dma_addr); 141 addr = exynos_drm_fb_dma_addr(state->fb, 0);
142 DRM_DEBUG_KMS("dma_addr = %pad\n", &addr);
134 143
135 if (ctx->vblank_on) 144 if (ctx->vblank_on)
136 schedule_work(&ctx->work); 145 schedule_work(&ctx->work);
@@ -439,17 +448,21 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
439 struct drm_device *drm_dev = data; 448 struct drm_device *drm_dev = data;
440 struct drm_encoder *encoder = &ctx->encoder; 449 struct drm_encoder *encoder = &ctx->encoder;
441 struct exynos_drm_plane *exynos_plane; 450 struct exynos_drm_plane *exynos_plane;
442 enum drm_plane_type type; 451 struct exynos_drm_plane_config plane_config = { 0 };
443 unsigned int zpos; 452 unsigned int i;
444 int pipe, ret; 453 int pipe, ret;
445 454
446 vidi_ctx_initialize(ctx, drm_dev); 455 vidi_ctx_initialize(ctx, drm_dev);
447 456
448 for (zpos = 0; zpos < WINDOWS_NR; zpos++) { 457 plane_config.pixel_formats = formats;
449 type = exynos_plane_get_type(zpos, CURSOR_WIN); 458 plane_config.num_pixel_formats = ARRAY_SIZE(formats);
450 ret = exynos_plane_init(drm_dev, &ctx->planes[zpos], 459
451 1 << ctx->pipe, type, formats, 460 for (i = 0; i < WINDOWS_NR; i++) {
452 ARRAY_SIZE(formats), zpos); 461 plane_config.zpos = i;
462 plane_config.type = vidi_win_types[i];
463
464 ret = exynos_plane_init(drm_dev, &ctx->planes[i],
465 1 << ctx->pipe, &plane_config);
453 if (ret) 466 if (ret)
454 return ret; 467 return ret;
455 } 468 }
@@ -473,7 +486,7 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
473 DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); 486 DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
474 487
475 drm_encoder_init(drm_dev, encoder, &exynos_vidi_encoder_funcs, 488 drm_encoder_init(drm_dev, encoder, &exynos_vidi_encoder_funcs,
476 DRM_MODE_ENCODER_TMDS); 489 DRM_MODE_ENCODER_TMDS, NULL);
477 490
478 drm_encoder_helper_add(encoder, &exynos_vidi_encoder_helper_funcs); 491 drm_encoder_helper_add(encoder, &exynos_vidi_encoder_helper_funcs);
479 492
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 57b675563e94..7d5ca6ca4efe 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -113,7 +113,7 @@ struct hdmi_context {
113 void __iomem *regs_hdmiphy; 113 void __iomem *regs_hdmiphy;
114 struct i2c_client *hdmiphy_port; 114 struct i2c_client *hdmiphy_port;
115 struct i2c_adapter *ddc_adpt; 115 struct i2c_adapter *ddc_adpt;
116 struct gpio_desc *hpd_gpio; 116 struct gpio_desc *hpd_gpio;
117 int irq; 117 int irq;
118 struct regmap *pmureg; 118 struct regmap *pmureg;
119 struct clk *hdmi; 119 struct clk *hdmi;
@@ -1588,8 +1588,6 @@ static void hdmi_enable(struct drm_encoder *encoder)
1588 if (hdata->powered) 1588 if (hdata->powered)
1589 return; 1589 return;
1590 1590
1591 hdata->powered = true;
1592
1593 pm_runtime_get_sync(hdata->dev); 1591 pm_runtime_get_sync(hdata->dev);
1594 1592
1595 if (regulator_bulk_enable(ARRAY_SIZE(supply), hdata->regul_bulk)) 1593 if (regulator_bulk_enable(ARRAY_SIZE(supply), hdata->regul_bulk))
@@ -1599,10 +1597,9 @@ static void hdmi_enable(struct drm_encoder *encoder)
1599 regmap_update_bits(hdata->pmureg, PMU_HDMI_PHY_CONTROL, 1597 regmap_update_bits(hdata->pmureg, PMU_HDMI_PHY_CONTROL,
1600 PMU_HDMI_PHY_ENABLE_BIT, 1); 1598 PMU_HDMI_PHY_ENABLE_BIT, 1);
1601 1599
1602 clk_prepare_enable(hdata->hdmi);
1603 clk_prepare_enable(hdata->sclk_hdmi);
1604
1605 hdmi_conf_apply(hdata); 1600 hdmi_conf_apply(hdata);
1601
1602 hdata->powered = true;
1606} 1603}
1607 1604
1608static void hdmi_disable(struct drm_encoder *encoder) 1605static void hdmi_disable(struct drm_encoder *encoder)
@@ -1633,9 +1630,6 @@ static void hdmi_disable(struct drm_encoder *encoder)
1633 1630
1634 cancel_delayed_work(&hdata->hotplug_work); 1631 cancel_delayed_work(&hdata->hotplug_work);
1635 1632
1636 clk_disable_unprepare(hdata->sclk_hdmi);
1637 clk_disable_unprepare(hdata->hdmi);
1638
1639 /* reset pmu hdmiphy control bit to disable hdmiphy */ 1633 /* reset pmu hdmiphy control bit to disable hdmiphy */
1640 regmap_update_bits(hdata->pmureg, PMU_HDMI_PHY_CONTROL, 1634 regmap_update_bits(hdata->pmureg, PMU_HDMI_PHY_CONTROL,
1641 PMU_HDMI_PHY_ENABLE_BIT, 0); 1635 PMU_HDMI_PHY_ENABLE_BIT, 0);
@@ -1793,7 +1787,7 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
1793 DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); 1787 DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
1794 1788
1795 drm_encoder_init(drm_dev, encoder, &exynos_hdmi_encoder_funcs, 1789 drm_encoder_init(drm_dev, encoder, &exynos_hdmi_encoder_funcs,
1796 DRM_MODE_ENCODER_TMDS); 1790 DRM_MODE_ENCODER_TMDS, NULL);
1797 1791
1798 drm_encoder_helper_add(encoder, &exynos_hdmi_encoder_helper_funcs); 1792 drm_encoder_helper_add(encoder, &exynos_hdmi_encoder_helper_funcs);
1799 1793
@@ -1978,12 +1972,49 @@ static int hdmi_remove(struct platform_device *pdev)
1978 return 0; 1972 return 0;
1979} 1973}
1980 1974
1975#ifdef CONFIG_PM
1976static int exynos_hdmi_suspend(struct device *dev)
1977{
1978 struct hdmi_context *hdata = dev_get_drvdata(dev);
1979
1980 clk_disable_unprepare(hdata->sclk_hdmi);
1981 clk_disable_unprepare(hdata->hdmi);
1982
1983 return 0;
1984}
1985
1986static int exynos_hdmi_resume(struct device *dev)
1987{
1988 struct hdmi_context *hdata = dev_get_drvdata(dev);
1989 int ret;
1990
1991 ret = clk_prepare_enable(hdata->hdmi);
1992 if (ret < 0) {
1993 DRM_ERROR("Failed to prepare_enable the hdmi clk [%d]\n", ret);
1994 return ret;
1995 }
1996 ret = clk_prepare_enable(hdata->sclk_hdmi);
1997 if (ret < 0) {
1998 DRM_ERROR("Failed to prepare_enable the sclk_mixer clk [%d]\n",
1999 ret);
2000 return ret;
2001 }
2002
2003 return 0;
2004}
2005#endif
2006
2007static const struct dev_pm_ops exynos_hdmi_pm_ops = {
2008 SET_RUNTIME_PM_OPS(exynos_hdmi_suspend, exynos_hdmi_resume, NULL)
2009};
2010
1981struct platform_driver hdmi_driver = { 2011struct platform_driver hdmi_driver = {
1982 .probe = hdmi_probe, 2012 .probe = hdmi_probe,
1983 .remove = hdmi_remove, 2013 .remove = hdmi_remove,
1984 .driver = { 2014 .driver = {
1985 .name = "exynos-hdmi", 2015 .name = "exynos-hdmi",
1986 .owner = THIS_MODULE, 2016 .owner = THIS_MODULE,
2017 .pm = &exynos_hdmi_pm_ops,
1987 .of_match_table = hdmi_match_types, 2018 .of_match_table = hdmi_match_types,
1988 }, 2019 },
1989}; 2020};
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index d09f8f9a8939..dfb35e2da4db 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -37,12 +37,12 @@
37 37
38#include "exynos_drm_drv.h" 38#include "exynos_drm_drv.h"
39#include "exynos_drm_crtc.h" 39#include "exynos_drm_crtc.h"
40#include "exynos_drm_fb.h"
40#include "exynos_drm_plane.h" 41#include "exynos_drm_plane.h"
41#include "exynos_drm_iommu.h" 42#include "exynos_drm_iommu.h"
42 43
43#define MIXER_WIN_NR 3 44#define MIXER_WIN_NR 3
44#define VP_DEFAULT_WIN 2 45#define VP_DEFAULT_WIN 2
45#define CURSOR_WIN 1
46 46
47/* The pixelformats that are natively supported by the mixer. */ 47/* The pixelformats that are natively supported by the mixer. */
48#define MXR_FORMAT_RGB565 4 48#define MXR_FORMAT_RGB565 4
@@ -111,6 +111,28 @@ struct mixer_drv_data {
111 bool has_sclk; 111 bool has_sclk;
112}; 112};
113 113
114static const struct exynos_drm_plane_config plane_configs[MIXER_WIN_NR] = {
115 {
116 .zpos = 0,
117 .type = DRM_PLANE_TYPE_PRIMARY,
118 .pixel_formats = mixer_formats,
119 .num_pixel_formats = ARRAY_SIZE(mixer_formats),
120 .capabilities = EXYNOS_DRM_PLANE_CAP_DOUBLE,
121 }, {
122 .zpos = 1,
123 .type = DRM_PLANE_TYPE_CURSOR,
124 .pixel_formats = mixer_formats,
125 .num_pixel_formats = ARRAY_SIZE(mixer_formats),
126 .capabilities = EXYNOS_DRM_PLANE_CAP_DOUBLE,
127 }, {
128 .zpos = 2,
129 .type = DRM_PLANE_TYPE_OVERLAY,
130 .pixel_formats = vp_formats,
131 .num_pixel_formats = ARRAY_SIZE(vp_formats),
132 .capabilities = EXYNOS_DRM_PLANE_CAP_SCALE,
133 },
134};
135
114static const u8 filter_y_horiz_tap8[] = { 136static const u8 filter_y_horiz_tap8[] = {
115 0, -1, -1, -1, -1, -1, -1, -1, 137 0, -1, -1, -1, -1, -1, -1, -1,
116 -1, -1, -1, -1, -1, 0, 0, 0, 138 -1, -1, -1, -1, -1, 0, 0, 0,
@@ -399,10 +421,11 @@ static void mixer_stop(struct mixer_context *ctx)
399static void vp_video_buffer(struct mixer_context *ctx, 421static void vp_video_buffer(struct mixer_context *ctx,
400 struct exynos_drm_plane *plane) 422 struct exynos_drm_plane *plane)
401{ 423{
424 struct exynos_drm_plane_state *state =
425 to_exynos_plane_state(plane->base.state);
426 struct drm_display_mode *mode = &state->base.crtc->state->adjusted_mode;
402 struct mixer_resources *res = &ctx->mixer_res; 427 struct mixer_resources *res = &ctx->mixer_res;
403 struct drm_plane_state *state = plane->base.state; 428 struct drm_framebuffer *fb = state->base.fb;
404 struct drm_framebuffer *fb = state->fb;
405 struct drm_display_mode *mode = &state->crtc->mode;
406 unsigned long flags; 429 unsigned long flags;
407 dma_addr_t luma_addr[2], chroma_addr[2]; 430 dma_addr_t luma_addr[2], chroma_addr[2];
408 bool tiled_mode = false; 431 bool tiled_mode = false;
@@ -422,8 +445,8 @@ static void vp_video_buffer(struct mixer_context *ctx,
422 return; 445 return;
423 } 446 }
424 447
425 luma_addr[0] = plane->dma_addr[0]; 448 luma_addr[0] = exynos_drm_fb_dma_addr(fb, 0);
426 chroma_addr[0] = plane->dma_addr[1]; 449 chroma_addr[0] = exynos_drm_fb_dma_addr(fb, 1);
427 450
428 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 451 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
429 ctx->interlace = true; 452 ctx->interlace = true;
@@ -459,24 +482,24 @@ static void vp_video_buffer(struct mixer_context *ctx,
459 vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[0]) | 482 vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[0]) |
460 VP_IMG_VSIZE(fb->height / 2)); 483 VP_IMG_VSIZE(fb->height / 2));
461 484
462 vp_reg_write(res, VP_SRC_WIDTH, plane->src_w); 485 vp_reg_write(res, VP_SRC_WIDTH, state->src.w);
463 vp_reg_write(res, VP_SRC_HEIGHT, plane->src_h); 486 vp_reg_write(res, VP_SRC_HEIGHT, state->src.h);
464 vp_reg_write(res, VP_SRC_H_POSITION, 487 vp_reg_write(res, VP_SRC_H_POSITION,
465 VP_SRC_H_POSITION_VAL(plane->src_x)); 488 VP_SRC_H_POSITION_VAL(state->src.x));
466 vp_reg_write(res, VP_SRC_V_POSITION, plane->src_y); 489 vp_reg_write(res, VP_SRC_V_POSITION, state->src.y);
467 490
468 vp_reg_write(res, VP_DST_WIDTH, plane->crtc_w); 491 vp_reg_write(res, VP_DST_WIDTH, state->crtc.w);
469 vp_reg_write(res, VP_DST_H_POSITION, plane->crtc_x); 492 vp_reg_write(res, VP_DST_H_POSITION, state->crtc.x);
470 if (ctx->interlace) { 493 if (ctx->interlace) {
471 vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_h / 2); 494 vp_reg_write(res, VP_DST_HEIGHT, state->crtc.h / 2);
472 vp_reg_write(res, VP_DST_V_POSITION, plane->crtc_y / 2); 495 vp_reg_write(res, VP_DST_V_POSITION, state->crtc.y / 2);
473 } else { 496 } else {
474 vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_h); 497 vp_reg_write(res, VP_DST_HEIGHT, state->crtc.h);
475 vp_reg_write(res, VP_DST_V_POSITION, plane->crtc_y); 498 vp_reg_write(res, VP_DST_V_POSITION, state->crtc.y);
476 } 499 }
477 500
478 vp_reg_write(res, VP_H_RATIO, plane->h_ratio); 501 vp_reg_write(res, VP_H_RATIO, state->h_ratio);
479 vp_reg_write(res, VP_V_RATIO, plane->v_ratio); 502 vp_reg_write(res, VP_V_RATIO, state->v_ratio);
480 503
481 vp_reg_write(res, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE); 504 vp_reg_write(res, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE);
482 505
@@ -505,37 +528,14 @@ static void mixer_layer_update(struct mixer_context *ctx)
505 mixer_reg_writemask(res, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE); 528 mixer_reg_writemask(res, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
506} 529}
507 530
508static int mixer_setup_scale(const struct exynos_drm_plane *plane,
509 unsigned int *x_ratio, unsigned int *y_ratio)
510{
511 if (plane->crtc_w != plane->src_w) {
512 if (plane->crtc_w == 2 * plane->src_w)
513 *x_ratio = 1;
514 else
515 goto fail;
516 }
517
518 if (plane->crtc_h != plane->src_h) {
519 if (plane->crtc_h == 2 * plane->src_h)
520 *y_ratio = 1;
521 else
522 goto fail;
523 }
524
525 return 0;
526
527fail:
528 DRM_DEBUG_KMS("only 2x width/height scaling of plane supported\n");
529 return -ENOTSUPP;
530}
531
532static void mixer_graph_buffer(struct mixer_context *ctx, 531static void mixer_graph_buffer(struct mixer_context *ctx,
533 struct exynos_drm_plane *plane) 532 struct exynos_drm_plane *plane)
534{ 533{
534 struct exynos_drm_plane_state *state =
535 to_exynos_plane_state(plane->base.state);
536 struct drm_display_mode *mode = &state->base.crtc->state->adjusted_mode;
535 struct mixer_resources *res = &ctx->mixer_res; 537 struct mixer_resources *res = &ctx->mixer_res;
536 struct drm_plane_state *state = plane->base.state; 538 struct drm_framebuffer *fb = state->base.fb;
537 struct drm_framebuffer *fb = state->fb;
538 struct drm_display_mode *mode = &state->crtc->mode;
539 unsigned long flags; 539 unsigned long flags;
540 unsigned int win = plane->zpos; 540 unsigned int win = plane->zpos;
541 unsigned int x_ratio = 0, y_ratio = 0; 541 unsigned int x_ratio = 0, y_ratio = 0;
@@ -567,17 +567,17 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
567 return; 567 return;
568 } 568 }
569 569
570 /* check if mixer supports requested scaling setup */ 570 /* ratio is already checked by common plane code */
571 if (mixer_setup_scale(plane, &x_ratio, &y_ratio)) 571 x_ratio = state->h_ratio == (1 << 15);
572 return; 572 y_ratio = state->v_ratio == (1 << 15);
573 573
574 dst_x_offset = plane->crtc_x; 574 dst_x_offset = state->crtc.x;
575 dst_y_offset = plane->crtc_y; 575 dst_y_offset = state->crtc.y;
576 576
577 /* converting dma address base and source offset */ 577 /* converting dma address base and source offset */
578 dma_addr = plane->dma_addr[0] 578 dma_addr = exynos_drm_fb_dma_addr(fb, 0)
579 + (plane->src_x * fb->bits_per_pixel >> 3) 579 + (state->src.x * fb->bits_per_pixel >> 3)
580 + (plane->src_y * fb->pitches[0]); 580 + (state->src.y * fb->pitches[0]);
581 src_x_offset = 0; 581 src_x_offset = 0;
582 src_y_offset = 0; 582 src_y_offset = 0;
583 583
@@ -605,8 +605,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
605 mixer_reg_write(res, MXR_RESOLUTION, val); 605 mixer_reg_write(res, MXR_RESOLUTION, val);
606 } 606 }
607 607
608 val = MXR_GRP_WH_WIDTH(plane->src_w); 608 val = MXR_GRP_WH_WIDTH(state->src.w);
609 val |= MXR_GRP_WH_HEIGHT(plane->src_h); 609 val |= MXR_GRP_WH_HEIGHT(state->src.h);
610 val |= MXR_GRP_WH_H_SCALE(x_ratio); 610 val |= MXR_GRP_WH_H_SCALE(x_ratio);
611 val |= MXR_GRP_WH_V_SCALE(y_ratio); 611 val |= MXR_GRP_WH_V_SCALE(y_ratio);
612 mixer_reg_write(res, MXR_GRAPHIC_WH(win), val); 612 mixer_reg_write(res, MXR_GRAPHIC_WH(win), val);
@@ -1020,43 +1020,12 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
1020{ 1020{
1021 struct mixer_context *ctx = crtc->ctx; 1021 struct mixer_context *ctx = crtc->ctx;
1022 struct mixer_resources *res = &ctx->mixer_res; 1022 struct mixer_resources *res = &ctx->mixer_res;
1023 int ret;
1024 1023
1025 if (test_bit(MXR_BIT_POWERED, &ctx->flags)) 1024 if (test_bit(MXR_BIT_POWERED, &ctx->flags))
1026 return; 1025 return;
1027 1026
1028 pm_runtime_get_sync(ctx->dev); 1027 pm_runtime_get_sync(ctx->dev);
1029 1028
1030 ret = clk_prepare_enable(res->mixer);
1031 if (ret < 0) {
1032 DRM_ERROR("Failed to prepare_enable the mixer clk [%d]\n", ret);
1033 return;
1034 }
1035 ret = clk_prepare_enable(res->hdmi);
1036 if (ret < 0) {
1037 DRM_ERROR("Failed to prepare_enable the hdmi clk [%d]\n", ret);
1038 return;
1039 }
1040 if (ctx->vp_enabled) {
1041 ret = clk_prepare_enable(res->vp);
1042 if (ret < 0) {
1043 DRM_ERROR("Failed to prepare_enable the vp clk [%d]\n",
1044 ret);
1045 return;
1046 }
1047 if (ctx->has_sclk) {
1048 ret = clk_prepare_enable(res->sclk_mixer);
1049 if (ret < 0) {
1050 DRM_ERROR("Failed to prepare_enable the " \
1051 "sclk_mixer clk [%d]\n",
1052 ret);
1053 return;
1054 }
1055 }
1056 }
1057
1058 set_bit(MXR_BIT_POWERED, &ctx->flags);
1059
1060 mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET); 1029 mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
1061 1030
1062 if (test_bit(MXR_BIT_VSYNC, &ctx->flags)) { 1031 if (test_bit(MXR_BIT_VSYNC, &ctx->flags)) {
@@ -1064,12 +1033,13 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
1064 mixer_reg_writemask(res, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC); 1033 mixer_reg_writemask(res, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC);
1065 } 1034 }
1066 mixer_win_reset(ctx); 1035 mixer_win_reset(ctx);
1036
1037 set_bit(MXR_BIT_POWERED, &ctx->flags);
1067} 1038}
1068 1039
1069static void mixer_disable(struct exynos_drm_crtc *crtc) 1040static void mixer_disable(struct exynos_drm_crtc *crtc)
1070{ 1041{
1071 struct mixer_context *ctx = crtc->ctx; 1042 struct mixer_context *ctx = crtc->ctx;
1072 struct mixer_resources *res = &ctx->mixer_res;
1073 int i; 1043 int i;
1074 1044
1075 if (!test_bit(MXR_BIT_POWERED, &ctx->flags)) 1045 if (!test_bit(MXR_BIT_POWERED, &ctx->flags))
@@ -1081,17 +1051,9 @@ static void mixer_disable(struct exynos_drm_crtc *crtc)
1081 for (i = 0; i < MIXER_WIN_NR; i++) 1051 for (i = 0; i < MIXER_WIN_NR; i++)
1082 mixer_disable_plane(crtc, &ctx->planes[i]); 1052 mixer_disable_plane(crtc, &ctx->planes[i]);
1083 1053
1084 clear_bit(MXR_BIT_POWERED, &ctx->flags); 1054 pm_runtime_put(ctx->dev);
1085 1055
1086 clk_disable_unprepare(res->hdmi); 1056 clear_bit(MXR_BIT_POWERED, &ctx->flags);
1087 clk_disable_unprepare(res->mixer);
1088 if (ctx->vp_enabled) {
1089 clk_disable_unprepare(res->vp);
1090 if (ctx->has_sclk)
1091 clk_disable_unprepare(res->sclk_mixer);
1092 }
1093
1094 pm_runtime_put_sync(ctx->dev);
1095} 1057}
1096 1058
1097/* Only valid for Mixer version 16.0.33.0 */ 1059/* Only valid for Mixer version 16.0.33.0 */
@@ -1187,30 +1149,19 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data)
1187 struct mixer_context *ctx = dev_get_drvdata(dev); 1149 struct mixer_context *ctx = dev_get_drvdata(dev);
1188 struct drm_device *drm_dev = data; 1150 struct drm_device *drm_dev = data;
1189 struct exynos_drm_plane *exynos_plane; 1151 struct exynos_drm_plane *exynos_plane;
1190 unsigned int zpos; 1152 unsigned int i;
1191 int ret; 1153 int ret;
1192 1154
1193 ret = mixer_initialize(ctx, drm_dev); 1155 ret = mixer_initialize(ctx, drm_dev);
1194 if (ret) 1156 if (ret)
1195 return ret; 1157 return ret;
1196 1158
1197 for (zpos = 0; zpos < MIXER_WIN_NR; zpos++) { 1159 for (i = 0; i < MIXER_WIN_NR; i++) {
1198 enum drm_plane_type type; 1160 if (i == VP_DEFAULT_WIN && !ctx->vp_enabled)
1199 const uint32_t *formats; 1161 continue;
1200 unsigned int fcount;
1201
1202 if (zpos < VP_DEFAULT_WIN) {
1203 formats = mixer_formats;
1204 fcount = ARRAY_SIZE(mixer_formats);
1205 } else {
1206 formats = vp_formats;
1207 fcount = ARRAY_SIZE(vp_formats);
1208 }
1209 1162
1210 type = exynos_plane_get_type(zpos, CURSOR_WIN); 1163 ret = exynos_plane_init(drm_dev, &ctx->planes[i],
1211 ret = exynos_plane_init(drm_dev, &ctx->planes[zpos], 1164 1 << ctx->pipe, &plane_configs[i]);
1212 1 << ctx->pipe, type, formats, fcount,
1213 zpos);
1214 if (ret) 1165 if (ret)
1215 return ret; 1166 return ret;
1216 } 1167 }
@@ -1293,10 +1244,70 @@ static int mixer_remove(struct platform_device *pdev)
1293 return 0; 1244 return 0;
1294} 1245}
1295 1246
1247#ifdef CONFIG_PM_SLEEP
1248static int exynos_mixer_suspend(struct device *dev)
1249{
1250 struct mixer_context *ctx = dev_get_drvdata(dev);
1251 struct mixer_resources *res = &ctx->mixer_res;
1252
1253 clk_disable_unprepare(res->hdmi);
1254 clk_disable_unprepare(res->mixer);
1255 if (ctx->vp_enabled) {
1256 clk_disable_unprepare(res->vp);
1257 if (ctx->has_sclk)
1258 clk_disable_unprepare(res->sclk_mixer);
1259 }
1260
1261 return 0;
1262}
1263
1264static int exynos_mixer_resume(struct device *dev)
1265{
1266 struct mixer_context *ctx = dev_get_drvdata(dev);
1267 struct mixer_resources *res = &ctx->mixer_res;
1268 int ret;
1269
1270 ret = clk_prepare_enable(res->mixer);
1271 if (ret < 0) {
1272 DRM_ERROR("Failed to prepare_enable the mixer clk [%d]\n", ret);
1273 return ret;
1274 }
1275 ret = clk_prepare_enable(res->hdmi);
1276 if (ret < 0) {
1277 DRM_ERROR("Failed to prepare_enable the hdmi clk [%d]\n", ret);
1278 return ret;
1279 }
1280 if (ctx->vp_enabled) {
1281 ret = clk_prepare_enable(res->vp);
1282 if (ret < 0) {
1283 DRM_ERROR("Failed to prepare_enable the vp clk [%d]\n",
1284 ret);
1285 return ret;
1286 }
1287 if (ctx->has_sclk) {
1288 ret = clk_prepare_enable(res->sclk_mixer);
1289 if (ret < 0) {
1290 DRM_ERROR("Failed to prepare_enable the " \
1291 "sclk_mixer clk [%d]\n",
1292 ret);
1293 return ret;
1294 }
1295 }
1296 }
1297
1298 return 0;
1299}
1300#endif
1301
1302static const struct dev_pm_ops exynos_mixer_pm_ops = {
1303 SET_RUNTIME_PM_OPS(exynos_mixer_suspend, exynos_mixer_resume, NULL)
1304};
1305
1296struct platform_driver mixer_driver = { 1306struct platform_driver mixer_driver = {
1297 .driver = { 1307 .driver = {
1298 .name = "exynos-mixer", 1308 .name = "exynos-mixer",
1299 .owner = THIS_MODULE, 1309 .owner = THIS_MODULE,
1310 .pm = &exynos_mixer_pm_ops,
1300 .of_match_table = mixer_match_types, 1311 .of_match_table = mixer_match_types,
1301 }, 1312 },
1302 .probe = mixer_probe, 1313 .probe = mixer_probe,
diff --git a/drivers/gpu/drm/exynos/regs-gsc.h b/drivers/gpu/drm/exynos/regs-gsc.h
index 9ad592707aaf..4704a993cbb7 100644
--- a/drivers/gpu/drm/exynos/regs-gsc.h
+++ b/drivers/gpu/drm/exynos/regs-gsc.h
@@ -273,12 +273,12 @@
273#define GSC_CLK_GATE_MODE_SNOOP_CNT(x) ((x) << 0) 273#define GSC_CLK_GATE_MODE_SNOOP_CNT(x) ((x) << 0)
274 274
275/* SYSCON. GSCBLK_CFG */ 275/* SYSCON. GSCBLK_CFG */
276#define SYSREG_GSCBLK_CFG1 (S3C_VA_SYS + 0x0224) 276#define SYSREG_GSCBLK_CFG1 0x0224
277#define GSC_BLK_DISP1WB_DEST(x) (x << 10) 277#define GSC_BLK_DISP1WB_DEST(x) (x << 10)
278#define GSC_BLK_SW_RESET_WB_DEST(x) (1 << (18 + x)) 278#define GSC_BLK_SW_RESET_WB_DEST(x) (1 << (18 + x))
279#define GSC_BLK_PXLASYNC_LO_MASK_WB(x) (0 << (14 + x)) 279#define GSC_BLK_PXLASYNC_LO_MASK_WB(x) (0 << (14 + x))
280#define GSC_BLK_GSCL_WB_IN_SRC_SEL(x) (1 << (2 * x)) 280#define GSC_BLK_GSCL_WB_IN_SRC_SEL(x) (1 << (2 * x))
281#define SYSREG_GSCBLK_CFG2 (S3C_VA_SYS + 0x2000) 281#define SYSREG_GSCBLK_CFG2 0x2000
282#define PXLASYNC_LO_MASK_CAMIF_GSCL(x) (1 << (x)) 282#define PXLASYNC_LO_MASK_CAMIF_GSCL(x) (1 << (x))
283 283
284#endif /* EXYNOS_REGS_GSC_H_ */ 284#endif /* EXYNOS_REGS_GSC_H_ */
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
index 82a3d311e164..d8ab8f0af10c 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -175,7 +175,7 @@ int fsl_dcu_drm_crtc_create(struct fsl_dcu_drm_device *fsl_dev)
175 175
176 primary = fsl_dcu_drm_primary_create_plane(fsl_dev->drm); 176 primary = fsl_dcu_drm_primary_create_plane(fsl_dev->drm);
177 ret = drm_crtc_init_with_planes(fsl_dev->drm, crtc, primary, NULL, 177 ret = drm_crtc_init_with_planes(fsl_dev->drm, crtc, primary, NULL,
178 &fsl_dcu_drm_crtc_funcs); 178 &fsl_dcu_drm_crtc_funcs, NULL);
179 if (ret < 0) 179 if (ret < 0)
180 return ret; 180 return ret;
181 181
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index 51daaea40b4d..4b13cf919575 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -249,7 +249,7 @@ struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev)
249 &fsl_dcu_drm_plane_funcs, 249 &fsl_dcu_drm_plane_funcs,
250 fsl_dcu_drm_plane_formats, 250 fsl_dcu_drm_plane_formats,
251 ARRAY_SIZE(fsl_dcu_drm_plane_formats), 251 ARRAY_SIZE(fsl_dcu_drm_plane_formats),
252 DRM_PLANE_TYPE_PRIMARY); 252 DRM_PLANE_TYPE_PRIMARY, NULL);
253 if (ret) { 253 if (ret) {
254 kfree(primary); 254 kfree(primary);
255 primary = NULL; 255 primary = NULL;
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index fe8ab5da04fb..8780deba5e8a 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -57,7 +57,7 @@ int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
57 57
58 encoder->possible_crtcs = 1; 58 encoder->possible_crtcs = 1;
59 ret = drm_encoder_init(fsl_dev->drm, encoder, &encoder_funcs, 59 ret = drm_encoder_init(fsl_dev->drm, encoder, &encoder_funcs,
60 DRM_MODE_ENCODER_LVDS); 60 DRM_MODE_ENCODER_LVDS, NULL);
61 if (ret < 0) 61 if (ret < 0)
62 return ret; 62 return ret;
63 63
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 3531f90e53d0..8745971a7680 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -619,6 +619,8 @@ const struct psb_ops cdv_chip_ops = {
619 .init_pm = cdv_init_pm, 619 .init_pm = cdv_init_pm,
620 .save_regs = cdv_save_display_registers, 620 .save_regs = cdv_save_display_registers,
621 .restore_regs = cdv_restore_display_registers, 621 .restore_regs = cdv_restore_display_registers,
622 .save_crtc = gma_crtc_save,
623 .restore_crtc = gma_crtc_restore,
622 .power_down = cdv_power_down, 624 .power_down = cdv_power_down,
623 .power_up = cdv_power_up, 625 .power_up = cdv_power_up,
624 .update_wm = cdv_update_wm, 626 .update_wm = cdv_update_wm,
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index 248c33a35ebf..d0717a85c7ec 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -273,7 +273,7 @@ void cdv_intel_crt_init(struct drm_device *dev,
273 273
274 encoder = &gma_encoder->base; 274 encoder = &gma_encoder->base;
275 drm_encoder_init(dev, encoder, 275 drm_encoder_init(dev, encoder,
276 &cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC); 276 &cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC, NULL);
277 277
278 gma_connector_attach_encoder(gma_connector, gma_encoder); 278 gma_connector_attach_encoder(gma_connector, gma_encoder);
279 279
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 7d47b3d5cc0d..6126546295e9 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -983,8 +983,6 @@ const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
983}; 983};
984 984
985const struct drm_crtc_funcs cdv_intel_crtc_funcs = { 985const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
986 .save = gma_crtc_save,
987 .restore = gma_crtc_restore,
988 .cursor_set = gma_crtc_cursor_set, 986 .cursor_set = gma_crtc_cursor_set,
989 .cursor_move = gma_crtc_cursor_move, 987 .cursor_move = gma_crtc_cursor_move,
990 .gamma_set = gma_crtc_gamma_set, 988 .gamma_set = gma_crtc_gamma_set,
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 17cea400ae32..7bb1f1aff932 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -2020,7 +2020,8 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
2020 encoder = &gma_encoder->base; 2020 encoder = &gma_encoder->base;
2021 2021
2022 drm_connector_init(dev, connector, &cdv_intel_dp_connector_funcs, type); 2022 drm_connector_init(dev, connector, &cdv_intel_dp_connector_funcs, type);
2023 drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS); 2023 drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs,
2024 DRM_MODE_ENCODER_TMDS, NULL);
2024 2025
2025 gma_connector_attach_encoder(gma_connector, gma_encoder); 2026 gma_connector_attach_encoder(gma_connector, gma_encoder);
2026 2027
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 6b1d3340ba14..ddf2d7700759 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -270,8 +270,6 @@ static const struct drm_connector_helper_funcs
270 270
271static const struct drm_connector_funcs cdv_hdmi_connector_funcs = { 271static const struct drm_connector_funcs cdv_hdmi_connector_funcs = {
272 .dpms = drm_helper_connector_dpms, 272 .dpms = drm_helper_connector_dpms,
273 .save = cdv_hdmi_save,
274 .restore = cdv_hdmi_restore,
275 .detect = cdv_hdmi_detect, 273 .detect = cdv_hdmi_detect,
276 .fill_modes = drm_helper_probe_single_connector_modes, 274 .fill_modes = drm_helper_probe_single_connector_modes,
277 .set_property = cdv_hdmi_set_property, 275 .set_property = cdv_hdmi_set_property,
@@ -306,13 +304,16 @@ void cdv_hdmi_init(struct drm_device *dev,
306 304
307 connector = &gma_connector->base; 305 connector = &gma_connector->base;
308 connector->polled = DRM_CONNECTOR_POLL_HPD; 306 connector->polled = DRM_CONNECTOR_POLL_HPD;
307 gma_connector->save = cdv_hdmi_save;
308 gma_connector->restore = cdv_hdmi_restore;
309
309 encoder = &gma_encoder->base; 310 encoder = &gma_encoder->base;
310 drm_connector_init(dev, connector, 311 drm_connector_init(dev, connector,
311 &cdv_hdmi_connector_funcs, 312 &cdv_hdmi_connector_funcs,
312 DRM_MODE_CONNECTOR_DVID); 313 DRM_MODE_CONNECTOR_DVID);
313 314
314 drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs, 315 drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
315 DRM_MODE_ENCODER_TMDS); 316 DRM_MODE_ENCODER_TMDS, NULL);
316 317
317 gma_connector_attach_encoder(gma_connector, gma_encoder); 318 gma_connector_attach_encoder(gma_connector, gma_encoder);
318 gma_encoder->type = INTEL_OUTPUT_HDMI; 319 gma_encoder->type = INTEL_OUTPUT_HDMI;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index 211069b2b951..813ef23a8054 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -530,8 +530,6 @@ static const struct drm_connector_helper_funcs
530 530
531static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = { 531static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = {
532 .dpms = drm_helper_connector_dpms, 532 .dpms = drm_helper_connector_dpms,
533 .save = cdv_intel_lvds_save,
534 .restore = cdv_intel_lvds_restore,
535 .detect = cdv_intel_lvds_detect, 533 .detect = cdv_intel_lvds_detect,
536 .fill_modes = drm_helper_probe_single_connector_modes, 534 .fill_modes = drm_helper_probe_single_connector_modes,
537 .set_property = cdv_intel_lvds_set_property, 535 .set_property = cdv_intel_lvds_set_property,
@@ -643,6 +641,8 @@ void cdv_intel_lvds_init(struct drm_device *dev,
643 gma_encoder->dev_priv = lvds_priv; 641 gma_encoder->dev_priv = lvds_priv;
644 642
645 connector = &gma_connector->base; 643 connector = &gma_connector->base;
644 gma_connector->save = cdv_intel_lvds_save;
645 gma_connector->restore = cdv_intel_lvds_restore;
646 encoder = &gma_encoder->base; 646 encoder = &gma_encoder->base;
647 647
648 648
@@ -652,7 +652,7 @@ void cdv_intel_lvds_init(struct drm_device *dev,
652 652
653 drm_encoder_init(dev, encoder, 653 drm_encoder_init(dev, encoder,
654 &cdv_intel_lvds_enc_funcs, 654 &cdv_intel_lvds_enc_funcs,
655 DRM_MODE_ENCODER_LVDS); 655 DRM_MODE_ENCODER_LVDS, NULL);
656 656
657 657
658 gma_connector_attach_encoder(gma_connector, gma_encoder); 658 gma_connector_attach_encoder(gma_connector, gma_encoder);
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 2eaf1b31c7bd..ee95c03a8c54 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -241,7 +241,7 @@ static struct fb_ops psbfb_unaccel_ops = {
241 */ 241 */
242static int psb_framebuffer_init(struct drm_device *dev, 242static int psb_framebuffer_init(struct drm_device *dev,
243 struct psb_framebuffer *fb, 243 struct psb_framebuffer *fb,
244 struct drm_mode_fb_cmd2 *mode_cmd, 244 const struct drm_mode_fb_cmd2 *mode_cmd,
245 struct gtt_range *gt) 245 struct gtt_range *gt)
246{ 246{
247 u32 bpp, depth; 247 u32 bpp, depth;
@@ -284,7 +284,7 @@ static int psb_framebuffer_init(struct drm_device *dev,
284 284
285static struct drm_framebuffer *psb_framebuffer_create 285static struct drm_framebuffer *psb_framebuffer_create
286 (struct drm_device *dev, 286 (struct drm_device *dev,
287 struct drm_mode_fb_cmd2 *mode_cmd, 287 const struct drm_mode_fb_cmd2 *mode_cmd,
288 struct gtt_range *gt) 288 struct gtt_range *gt)
289{ 289{
290 struct psb_framebuffer *fb; 290 struct psb_framebuffer *fb;
@@ -406,8 +406,6 @@ static int psbfb_create(struct psb_fbdev *fbdev,
406 406
407 memset(dev_priv->vram_addr + backing->offset, 0, size); 407 memset(dev_priv->vram_addr + backing->offset, 0, size);
408 408
409 mutex_lock(&dev->struct_mutex);
410
411 info = drm_fb_helper_alloc_fbi(&fbdev->psb_fb_helper); 409 info = drm_fb_helper_alloc_fbi(&fbdev->psb_fb_helper);
412 if (IS_ERR(info)) { 410 if (IS_ERR(info)) {
413 ret = PTR_ERR(info); 411 ret = PTR_ERR(info);
@@ -463,17 +461,15 @@ static int psbfb_create(struct psb_fbdev *fbdev,
463 dev_dbg(dev->dev, "allocated %dx%d fb\n", 461 dev_dbg(dev->dev, "allocated %dx%d fb\n",
464 psbfb->base.width, psbfb->base.height); 462 psbfb->base.width, psbfb->base.height);
465 463
466 mutex_unlock(&dev->struct_mutex);
467 return 0; 464 return 0;
468out_unref: 465out_unref:
469 if (backing->stolen) 466 if (backing->stolen)
470 psb_gtt_free_range(dev, backing); 467 psb_gtt_free_range(dev, backing);
471 else 468 else
472 drm_gem_object_unreference(&backing->gem); 469 drm_gem_object_unreference_unlocked(&backing->gem);
473 470
474 drm_fb_helper_release_fbi(&fbdev->psb_fb_helper); 471 drm_fb_helper_release_fbi(&fbdev->psb_fb_helper);
475out_err1: 472out_err1:
476 mutex_unlock(&dev->struct_mutex);
477 psb_gtt_free_range(dev, backing); 473 psb_gtt_free_range(dev, backing);
478 return ret; 474 return ret;
479} 475}
@@ -488,7 +484,7 @@ out_err1:
488 */ 484 */
489static struct drm_framebuffer *psb_user_framebuffer_create 485static struct drm_framebuffer *psb_user_framebuffer_create
490 (struct drm_device *dev, struct drm_file *filp, 486 (struct drm_device *dev, struct drm_file *filp,
491 struct drm_mode_fb_cmd2 *cmd) 487 const struct drm_mode_fb_cmd2 *cmd)
492{ 488{
493 struct gtt_range *r; 489 struct gtt_range *r;
494 struct drm_gem_object *obj; 490 struct drm_gem_object *obj;
@@ -569,7 +565,7 @@ static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
569 drm_framebuffer_cleanup(&psbfb->base); 565 drm_framebuffer_cleanup(&psbfb->base);
570 566
571 if (psbfb->gtt) 567 if (psbfb->gtt)
572 drm_gem_object_unreference(&psbfb->gtt->gem); 568 drm_gem_object_unreference_unlocked(&psbfb->gtt->gem);
573 return 0; 569 return 0;
574} 570}
575 571
@@ -784,12 +780,8 @@ void psb_modeset_cleanup(struct drm_device *dev)
784{ 780{
785 struct drm_psb_private *dev_priv = dev->dev_private; 781 struct drm_psb_private *dev_priv = dev->dev_private;
786 if (dev_priv->modeset) { 782 if (dev_priv->modeset) {
787 mutex_lock(&dev->struct_mutex);
788
789 drm_kms_helper_poll_fini(dev); 783 drm_kms_helper_poll_fini(dev);
790 psb_fbdev_fini(dev); 784 psb_fbdev_fini(dev);
791 drm_mode_config_cleanup(dev); 785 drm_mode_config_cleanup(dev);
792
793 mutex_unlock(&dev->struct_mutex);
794 } 786 }
795} 787}
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index c707fa6fca85..506224b3a0ad 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -62,15 +62,10 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
62 int ret = 0; 62 int ret = 0;
63 struct drm_gem_object *obj; 63 struct drm_gem_object *obj;
64 64
65 mutex_lock(&dev->struct_mutex);
66
67 /* GEM does all our handle to object mapping */ 65 /* GEM does all our handle to object mapping */
68 obj = drm_gem_object_lookup(dev, file, handle); 66 obj = drm_gem_object_lookup(dev, file, handle);
69 if (obj == NULL) { 67 if (obj == NULL)
70 ret = -ENOENT; 68 return -ENOENT;
71 goto unlock;
72 }
73 /* What validation is needed here ? */
74 69
75 /* Make it mmapable */ 70 /* Make it mmapable */
76 ret = drm_gem_create_mmap_offset(obj); 71 ret = drm_gem_create_mmap_offset(obj);
@@ -78,9 +73,7 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
78 goto out; 73 goto out;
79 *offset = drm_vma_node_offset_addr(&obj->vma_node); 74 *offset = drm_vma_node_offset_addr(&obj->vma_node);
80out: 75out:
81 drm_gem_object_unreference(obj); 76 drm_gem_object_unreference_unlocked(obj);
82unlock:
83 mutex_unlock(&dev->struct_mutex);
84 return ret; 77 return ret;
85} 78}
86 79
@@ -130,7 +123,7 @@ int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size,
130 return ret; 123 return ret;
131 } 124 }
132 /* We have the initial and handle reference but need only one now */ 125 /* We have the initial and handle reference but need only one now */
133 drm_gem_object_unreference(&r->gem); 126 drm_gem_object_unreference_unlocked(&r->gem);
134 *handlep = handle; 127 *handlep = handle;
135 return 0; 128 return 0;
136} 129}
@@ -189,7 +182,7 @@ int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
189 182
190 /* Make sure we don't parallel update on a fault, nor move or remove 183 /* Make sure we don't parallel update on a fault, nor move or remove
191 something from beneath our feet */ 184 something from beneath our feet */
192 mutex_lock(&dev->struct_mutex); 185 mutex_lock(&dev_priv->mmap_mutex);
193 186
194 /* For now the mmap pins the object and it stays pinned. As things 187 /* For now the mmap pins the object and it stays pinned. As things
195 stand that will do us no harm */ 188 stand that will do us no harm */
@@ -215,7 +208,7 @@ int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
215 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); 208 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
216 209
217fail: 210fail:
218 mutex_unlock(&dev->struct_mutex); 211 mutex_unlock(&dev_priv->mmap_mutex);
219 switch (ret) { 212 switch (ret) {
220 case 0: 213 case 0:
221 case -ERESTARTSYS: 214 case -ERESTARTSYS:
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index 001b450b27b3..ff17af4cfc64 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -349,8 +349,6 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
349 /* If we didn't get a handle then turn the cursor off */ 349 /* If we didn't get a handle then turn the cursor off */
350 if (!handle) { 350 if (!handle) {
351 temp = CURSOR_MODE_DISABLE; 351 temp = CURSOR_MODE_DISABLE;
352 mutex_lock(&dev->struct_mutex);
353
354 if (gma_power_begin(dev, false)) { 352 if (gma_power_begin(dev, false)) {
355 REG_WRITE(control, temp); 353 REG_WRITE(control, temp);
356 REG_WRITE(base, 0); 354 REG_WRITE(base, 0);
@@ -362,11 +360,9 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
362 gt = container_of(gma_crtc->cursor_obj, 360 gt = container_of(gma_crtc->cursor_obj,
363 struct gtt_range, gem); 361 struct gtt_range, gem);
364 psb_gtt_unpin(gt); 362 psb_gtt_unpin(gt);
365 drm_gem_object_unreference(gma_crtc->cursor_obj); 363 drm_gem_object_unreference_unlocked(gma_crtc->cursor_obj);
366 gma_crtc->cursor_obj = NULL; 364 gma_crtc->cursor_obj = NULL;
367 } 365 }
368
369 mutex_unlock(&dev->struct_mutex);
370 return 0; 366 return 0;
371 } 367 }
372 368
@@ -376,7 +372,6 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
376 return -EINVAL; 372 return -EINVAL;
377 } 373 }
378 374
379 mutex_lock(&dev->struct_mutex);
380 obj = drm_gem_object_lookup(dev, file_priv, handle); 375 obj = drm_gem_object_lookup(dev, file_priv, handle);
381 if (!obj) { 376 if (!obj) {
382 ret = -ENOENT; 377 ret = -ENOENT;
@@ -441,17 +436,15 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
441 if (gma_crtc->cursor_obj) { 436 if (gma_crtc->cursor_obj) {
442 gt = container_of(gma_crtc->cursor_obj, struct gtt_range, gem); 437 gt = container_of(gma_crtc->cursor_obj, struct gtt_range, gem);
443 psb_gtt_unpin(gt); 438 psb_gtt_unpin(gt);
444 drm_gem_object_unreference(gma_crtc->cursor_obj); 439 drm_gem_object_unreference_unlocked(gma_crtc->cursor_obj);
445 } 440 }
446 441
447 gma_crtc->cursor_obj = obj; 442 gma_crtc->cursor_obj = obj;
448unlock: 443unlock:
449 mutex_unlock(&dev->struct_mutex);
450 return ret; 444 return ret;
451 445
452unref_cursor: 446unref_cursor:
453 drm_gem_object_unreference(obj); 447 drm_gem_object_unreference_unlocked(obj);
454 mutex_unlock(&dev->struct_mutex);
455 return ret; 448 return ret;
456} 449}
457 450
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index ce015db59dc6..8f69225ce2b4 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -425,6 +425,7 @@ int psb_gtt_init(struct drm_device *dev, int resume)
425 425
426 if (!resume) { 426 if (!resume) {
427 mutex_init(&dev_priv->gtt_mutex); 427 mutex_init(&dev_priv->gtt_mutex);
428 mutex_init(&dev_priv->mmap_mutex);
428 psb_gtt_alloc(dev); 429 psb_gtt_alloc(dev);
429 } 430 }
430 431
diff --git a/drivers/gpu/drm/gma500/mdfld_device.c b/drivers/gpu/drm/gma500/mdfld_device.c
index 265ad0de44a6..e2ab858122f9 100644
--- a/drivers/gpu/drm/gma500/mdfld_device.c
+++ b/drivers/gpu/drm/gma500/mdfld_device.c
@@ -546,6 +546,8 @@ const struct psb_ops mdfld_chip_ops = {
546 546
547 .save_regs = mdfld_save_registers, 547 .save_regs = mdfld_save_registers,
548 .restore_regs = mdfld_restore_registers, 548 .restore_regs = mdfld_restore_registers,
549 .save_crtc = gma_crtc_save,
550 .restore_crtc = gma_crtc_restore,
549 .power_down = mdfld_power_down, 551 .power_down = mdfld_power_down,
550 .power_up = mdfld_power_up, 552 .power_up = mdfld_power_up,
551}; 553};
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
index d4813e03f5ee..1a1acd3cb049 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
@@ -994,7 +994,7 @@ struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
994 drm_encoder_init(dev, 994 drm_encoder_init(dev,
995 encoder, 995 encoder,
996 p_funcs->encoder_funcs, 996 p_funcs->encoder_funcs,
997 DRM_MODE_ENCODER_LVDS); 997 DRM_MODE_ENCODER_LVDS, NULL);
998 drm_encoder_helper_add(encoder, 998 drm_encoder_helper_add(encoder,
999 p_funcs->encoder_helper_funcs); 999 p_funcs->encoder_helper_funcs);
1000 1000
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 89f705c3a5eb..d758f4cc6805 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -405,8 +405,6 @@ static struct drm_encoder *mdfld_dsi_connector_best_encoder(
405/*DSI connector funcs*/ 405/*DSI connector funcs*/
406static const struct drm_connector_funcs mdfld_dsi_connector_funcs = { 406static const struct drm_connector_funcs mdfld_dsi_connector_funcs = {
407 .dpms = /*drm_helper_connector_dpms*/mdfld_dsi_connector_dpms, 407 .dpms = /*drm_helper_connector_dpms*/mdfld_dsi_connector_dpms,
408 .save = mdfld_dsi_connector_save,
409 .restore = mdfld_dsi_connector_restore,
410 .detect = mdfld_dsi_connector_detect, 408 .detect = mdfld_dsi_connector_detect,
411 .fill_modes = drm_helper_probe_single_connector_modes, 409 .fill_modes = drm_helper_probe_single_connector_modes,
412 .set_property = mdfld_dsi_connector_set_property, 410 .set_property = mdfld_dsi_connector_set_property,
@@ -563,6 +561,9 @@ void mdfld_dsi_output_init(struct drm_device *dev,
563 561
564 562
565 connector = &dsi_connector->base.base; 563 connector = &dsi_connector->base.base;
564 dsi_connector->base.save = mdfld_dsi_connector_save;
565 dsi_connector->base.restore = mdfld_dsi_connector_restore;
566
566 drm_connector_init(dev, connector, &mdfld_dsi_connector_funcs, 567 drm_connector_init(dev, connector, &mdfld_dsi_connector_funcs,
567 DRM_MODE_CONNECTOR_LVDS); 568 DRM_MODE_CONNECTOR_LVDS);
568 drm_connector_helper_add(connector, &mdfld_dsi_connector_helper_funcs); 569 drm_connector_helper_add(connector, &mdfld_dsi_connector_helper_funcs);
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
index 368a03ae3010..ba30b43a3412 100644
--- a/drivers/gpu/drm/gma500/oaktrail_device.c
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -568,6 +568,8 @@ const struct psb_ops oaktrail_chip_ops = {
568 568
569 .save_regs = oaktrail_save_display_registers, 569 .save_regs = oaktrail_save_display_registers,
570 .restore_regs = oaktrail_restore_display_registers, 570 .restore_regs = oaktrail_restore_display_registers,
571 .save_crtc = gma_crtc_save,
572 .restore_crtc = gma_crtc_restore,
571 .power_down = oaktrail_power_down, 573 .power_down = oaktrail_power_down,
572 .power_up = oaktrail_power_up, 574 .power_up = oaktrail_power_up,
573 575
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index 2310d879cdc2..2d18499d6060 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -654,7 +654,7 @@ void oaktrail_hdmi_init(struct drm_device *dev,
654 654
655 drm_encoder_init(dev, encoder, 655 drm_encoder_init(dev, encoder,
656 &oaktrail_hdmi_enc_funcs, 656 &oaktrail_hdmi_enc_funcs,
657 DRM_MODE_ENCODER_TMDS); 657 DRM_MODE_ENCODER_TMDS, NULL);
658 658
659 gma_connector_attach_encoder(gma_connector, gma_encoder); 659 gma_connector_attach_encoder(gma_connector, gma_encoder);
660 660
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 83bbc271bcfb..f7038f12ac76 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -323,7 +323,7 @@ void oaktrail_lvds_init(struct drm_device *dev,
323 DRM_MODE_CONNECTOR_LVDS); 323 DRM_MODE_CONNECTOR_LVDS);
324 324
325 drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs, 325 drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
326 DRM_MODE_ENCODER_LVDS); 326 DRM_MODE_ENCODER_LVDS, NULL);
327 327
328 gma_connector_attach_encoder(gma_connector, gma_encoder); 328 gma_connector_attach_encoder(gma_connector, gma_encoder);
329 gma_encoder->type = INTEL_OUTPUT_LVDS; 329 gma_encoder->type = INTEL_OUTPUT_LVDS;
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index 07df7d4eea72..dc0f8527570c 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -181,7 +181,7 @@ static int psb_save_display_registers(struct drm_device *dev)
181{ 181{
182 struct drm_psb_private *dev_priv = dev->dev_private; 182 struct drm_psb_private *dev_priv = dev->dev_private;
183 struct drm_crtc *crtc; 183 struct drm_crtc *crtc;
184 struct drm_connector *connector; 184 struct gma_connector *connector;
185 struct psb_state *regs = &dev_priv->regs.psb; 185 struct psb_state *regs = &dev_priv->regs.psb;
186 186
187 /* Display arbitration control + watermarks */ 187 /* Display arbitration control + watermarks */
@@ -198,12 +198,12 @@ static int psb_save_display_registers(struct drm_device *dev)
198 drm_modeset_lock_all(dev); 198 drm_modeset_lock_all(dev);
199 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 199 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
200 if (drm_helper_crtc_in_use(crtc)) 200 if (drm_helper_crtc_in_use(crtc))
201 crtc->funcs->save(crtc); 201 dev_priv->ops->save_crtc(crtc);
202 } 202 }
203 203
204 list_for_each_entry(connector, &dev->mode_config.connector_list, head) 204 list_for_each_entry(connector, &dev->mode_config.connector_list, base.head)
205 if (connector->funcs->save) 205 if (connector->save)
206 connector->funcs->save(connector); 206 connector->save(&connector->base);
207 207
208 drm_modeset_unlock_all(dev); 208 drm_modeset_unlock_all(dev);
209 return 0; 209 return 0;
@@ -219,7 +219,7 @@ static int psb_restore_display_registers(struct drm_device *dev)
219{ 219{
220 struct drm_psb_private *dev_priv = dev->dev_private; 220 struct drm_psb_private *dev_priv = dev->dev_private;
221 struct drm_crtc *crtc; 221 struct drm_crtc *crtc;
222 struct drm_connector *connector; 222 struct gma_connector *connector;
223 struct psb_state *regs = &dev_priv->regs.psb; 223 struct psb_state *regs = &dev_priv->regs.psb;
224 224
225 /* Display arbitration + watermarks */ 225 /* Display arbitration + watermarks */
@@ -238,11 +238,11 @@ static int psb_restore_display_registers(struct drm_device *dev)
238 drm_modeset_lock_all(dev); 238 drm_modeset_lock_all(dev);
239 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 239 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
240 if (drm_helper_crtc_in_use(crtc)) 240 if (drm_helper_crtc_in_use(crtc))
241 crtc->funcs->restore(crtc); 241 dev_priv->ops->restore_crtc(crtc);
242 242
243 list_for_each_entry(connector, &dev->mode_config.connector_list, head) 243 list_for_each_entry(connector, &dev->mode_config.connector_list, base.head)
244 if (connector->funcs->restore) 244 if (connector->restore)
245 connector->funcs->restore(connector); 245 connector->restore(&connector->base);
246 246
247 drm_modeset_unlock_all(dev); 247 drm_modeset_unlock_all(dev);
248 return 0; 248 return 0;
@@ -354,6 +354,8 @@ const struct psb_ops psb_chip_ops = {
354 .init_pm = psb_init_pm, 354 .init_pm = psb_init_pm,
355 .save_regs = psb_save_display_registers, 355 .save_regs = psb_save_display_registers,
356 .restore_regs = psb_restore_display_registers, 356 .restore_regs = psb_restore_display_registers,
357 .save_crtc = gma_crtc_save,
358 .restore_crtc = gma_crtc_restore,
357 .power_down = psb_power_down, 359 .power_down = psb_power_down,
358 .power_up = psb_power_up, 360 .power_up = psb_power_up,
359}; 361};
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index e21726ecac32..b74372760d7f 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -465,6 +465,8 @@ struct drm_psb_private {
465 struct mutex gtt_mutex; 465 struct mutex gtt_mutex;
466 struct resource *gtt_mem; /* Our PCI resource */ 466 struct resource *gtt_mem; /* Our PCI resource */
467 467
468 struct mutex mmap_mutex;
469
468 struct psb_mmu_driver *mmu; 470 struct psb_mmu_driver *mmu;
469 struct psb_mmu_pd *pf_pd; 471 struct psb_mmu_pd *pf_pd;
470 472
@@ -651,6 +653,8 @@ struct psb_ops {
651 void (*init_pm)(struct drm_device *dev); 653 void (*init_pm)(struct drm_device *dev);
652 int (*save_regs)(struct drm_device *dev); 654 int (*save_regs)(struct drm_device *dev);
653 int (*restore_regs)(struct drm_device *dev); 655 int (*restore_regs)(struct drm_device *dev);
656 void (*save_crtc)(struct drm_crtc *crtc);
657 void (*restore_crtc)(struct drm_crtc *crtc);
654 int (*power_up)(struct drm_device *dev); 658 int (*power_up)(struct drm_device *dev);
655 int (*power_down)(struct drm_device *dev); 659 int (*power_down)(struct drm_device *dev);
656 void (*update_wm)(struct drm_device *dev, struct drm_crtc *crtc); 660 void (*update_wm)(struct drm_device *dev, struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 6659da88fe5b..dcdbc37e55e1 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -439,8 +439,6 @@ const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
439}; 439};
440 440
441const struct drm_crtc_funcs psb_intel_crtc_funcs = { 441const struct drm_crtc_funcs psb_intel_crtc_funcs = {
442 .save = gma_crtc_save,
443 .restore = gma_crtc_restore,
444 .cursor_set = gma_crtc_cursor_set, 442 .cursor_set = gma_crtc_cursor_set,
445 .cursor_move = gma_crtc_cursor_move, 443 .cursor_move = gma_crtc_cursor_move,
446 .gamma_set = gma_crtc_gamma_set, 444 .gamma_set = gma_crtc_gamma_set,
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 860dd2177ca1..2a3b7c684db2 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -140,6 +140,9 @@ struct gma_encoder {
140struct gma_connector { 140struct gma_connector {
141 struct drm_connector base; 141 struct drm_connector base;
142 struct gma_encoder *encoder; 142 struct gma_encoder *encoder;
143
144 void (*save)(struct drm_connector *connector);
145 void (*restore)(struct drm_connector *connector);
143}; 146};
144 147
145struct psb_intel_crtc_state { 148struct psb_intel_crtc_state {
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index ce0645d0c1e5..b1b93317d054 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -653,8 +653,6 @@ const struct drm_connector_helper_funcs
653 653
654const struct drm_connector_funcs psb_intel_lvds_connector_funcs = { 654const struct drm_connector_funcs psb_intel_lvds_connector_funcs = {
655 .dpms = drm_helper_connector_dpms, 655 .dpms = drm_helper_connector_dpms,
656 .save = psb_intel_lvds_save,
657 .restore = psb_intel_lvds_restore,
658 .detect = psb_intel_lvds_detect, 656 .detect = psb_intel_lvds_detect,
659 .fill_modes = drm_helper_probe_single_connector_modes, 657 .fill_modes = drm_helper_probe_single_connector_modes,
660 .set_property = psb_intel_lvds_set_property, 658 .set_property = psb_intel_lvds_set_property,
@@ -715,6 +713,9 @@ void psb_intel_lvds_init(struct drm_device *dev,
715 gma_encoder->dev_priv = lvds_priv; 713 gma_encoder->dev_priv = lvds_priv;
716 714
717 connector = &gma_connector->base; 715 connector = &gma_connector->base;
716 gma_connector->save = psb_intel_lvds_save;
717 gma_connector->restore = psb_intel_lvds_restore;
718
718 encoder = &gma_encoder->base; 719 encoder = &gma_encoder->base;
719 drm_connector_init(dev, connector, 720 drm_connector_init(dev, connector,
720 &psb_intel_lvds_connector_funcs, 721 &psb_intel_lvds_connector_funcs,
@@ -722,7 +723,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
722 723
723 drm_encoder_init(dev, encoder, 724 drm_encoder_init(dev, encoder,
724 &psb_intel_lvds_enc_funcs, 725 &psb_intel_lvds_enc_funcs,
725 DRM_MODE_ENCODER_LVDS); 726 DRM_MODE_ENCODER_LVDS, NULL);
726 727
727 gma_connector_attach_encoder(gma_connector, gma_encoder); 728 gma_connector_attach_encoder(gma_connector, gma_encoder);
728 gma_encoder->type = INTEL_OUTPUT_LVDS; 729 gma_encoder->type = INTEL_OUTPUT_LVDS;
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 58529cea575d..e787d376ba67 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -1837,8 +1837,6 @@ static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
1837 1837
1838static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = { 1838static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
1839 .dpms = drm_helper_connector_dpms, 1839 .dpms = drm_helper_connector_dpms,
1840 .save = psb_intel_sdvo_save,
1841 .restore = psb_intel_sdvo_restore,
1842 .detect = psb_intel_sdvo_detect, 1840 .detect = psb_intel_sdvo_detect,
1843 .fill_modes = drm_helper_probe_single_connector_modes, 1841 .fill_modes = drm_helper_probe_single_connector_modes,
1844 .set_property = psb_intel_sdvo_set_property, 1842 .set_property = psb_intel_sdvo_set_property,
@@ -2021,6 +2019,9 @@ psb_intel_sdvo_connector_init(struct psb_intel_sdvo_connector *connector,
2021 connector->base.base.doublescan_allowed = 0; 2019 connector->base.base.doublescan_allowed = 0;
2022 connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB; 2020 connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
2023 2021
2022 connector->base.save = psb_intel_sdvo_save;
2023 connector->base.restore = psb_intel_sdvo_restore;
2024
2024 gma_connector_attach_encoder(&connector->base, &encoder->base); 2025 gma_connector_attach_encoder(&connector->base, &encoder->base);
2025 drm_connector_register(&connector->base.base); 2026 drm_connector_register(&connector->base.base);
2026} 2027}
@@ -2525,7 +2526,8 @@ bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2525 /* encoder type will be decided later */ 2526 /* encoder type will be decided later */
2526 gma_encoder = &psb_intel_sdvo->base; 2527 gma_encoder = &psb_intel_sdvo->base;
2527 gma_encoder->type = INTEL_OUTPUT_SDVO; 2528 gma_encoder->type = INTEL_OUTPUT_SDVO;
2528 drm_encoder_init(dev, &gma_encoder->base, &psb_intel_sdvo_enc_funcs, 0); 2529 drm_encoder_init(dev, &gma_encoder->base, &psb_intel_sdvo_enc_funcs,
2530 0, NULL);
2529 2531
2530 /* Read the regs to test if we can talk to the device */ 2532 /* Read the regs to test if we can talk to the device */
2531 for (i = 0; i < 0x40; i++) { 2533 for (i = 0; i < 0x40; i++) {
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 896b6aaf8c4d..a46248f0c9c3 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -855,18 +855,6 @@ static void tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
855 priv->dpms = mode; 855 priv->dpms = mode;
856} 856}
857 857
858static void
859tda998x_encoder_save(struct drm_encoder *encoder)
860{
861 DBG("");
862}
863
864static void
865tda998x_encoder_restore(struct drm_encoder *encoder)
866{
867 DBG("");
868}
869
870static bool 858static bool
871tda998x_encoder_mode_fixup(struct drm_encoder *encoder, 859tda998x_encoder_mode_fixup(struct drm_encoder *encoder,
872 const struct drm_display_mode *mode, 860 const struct drm_display_mode *mode,
@@ -1351,8 +1339,6 @@ static void tda998x_encoder_commit(struct drm_encoder *encoder)
1351 1339
1352static const struct drm_encoder_helper_funcs tda998x_encoder_helper_funcs = { 1340static const struct drm_encoder_helper_funcs tda998x_encoder_helper_funcs = {
1353 .dpms = tda998x_encoder_dpms, 1341 .dpms = tda998x_encoder_dpms,
1354 .save = tda998x_encoder_save,
1355 .restore = tda998x_encoder_restore,
1356 .mode_fixup = tda998x_encoder_mode_fixup, 1342 .mode_fixup = tda998x_encoder_mode_fixup,
1357 .prepare = tda998x_encoder_prepare, 1343 .prepare = tda998x_encoder_prepare,
1358 .commit = tda998x_encoder_commit, 1344 .commit = tda998x_encoder_commit,
@@ -1437,7 +1423,7 @@ static int tda998x_bind(struct device *dev, struct device *master, void *data)
1437 1423
1438 drm_encoder_helper_add(&priv->encoder, &tda998x_encoder_helper_funcs); 1424 drm_encoder_helper_add(&priv->encoder, &tda998x_encoder_helper_funcs);
1439 ret = drm_encoder_init(drm, &priv->encoder, &tda998x_encoder_funcs, 1425 ret = drm_encoder_init(drm, &priv->encoder, &tda998x_encoder_funcs,
1440 DRM_MODE_ENCODER_TMDS); 1426 DRM_MODE_ENCODER_TMDS, NULL);
1441 if (ret) 1427 if (ret)
1442 goto err_encoder; 1428 goto err_encoder;
1443 1429
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 051eab33e4c7..fcd77b27514d 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -10,6 +10,7 @@ config DRM_I915
10 # the shmem_readpage() which depends upon tmpfs 10 # the shmem_readpage() which depends upon tmpfs
11 select SHMEM 11 select SHMEM
12 select TMPFS 12 select TMPFS
13 select STOP_MACHINE
13 select DRM_KMS_HELPER 14 select DRM_KMS_HELPER
14 select DRM_PANEL 15 select DRM_PANEL
15 select DRM_MIPI_DSI 16 select DRM_MIPI_DSI
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 44d290ae1999..0851de07bd13 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -77,6 +77,7 @@ i915-y += dvo_ch7017.o \
77 dvo_tfp410.o \ 77 dvo_tfp410.o \
78 intel_crt.o \ 78 intel_crt.o \
79 intel_ddi.o \ 79 intel_ddi.o \
80 intel_dp_link_training.o \
80 intel_dp_mst.o \ 81 intel_dp_mst.o \
81 intel_dp.o \ 82 intel_dp.o \
82 intel_dsi.o \ 83 intel_dsi.o \
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index 0e2c1b9648a7..13dea4263554 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -32,7 +32,8 @@ struct intel_dvo_device {
32 const char *name; 32 const char *name;
33 int type; 33 int type;
34 /* DVOA/B/C output register */ 34 /* DVOA/B/C output register */
35 u32 dvo_reg; 35 i915_reg_t dvo_reg;
36 i915_reg_t dvo_srcdim_reg;
36 /* GPIO register used for i2c bus to control this device */ 37 /* GPIO register used for i2c bus to control this device */
37 u32 gpio; 38 u32 gpio;
38 int slave_addr; 39 int slave_addr;
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index db58c8d664c2..814d894ed925 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -407,14 +407,14 @@ static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = {
407 * LRI. 407 * LRI.
408 */ 408 */
409struct drm_i915_reg_descriptor { 409struct drm_i915_reg_descriptor {
410 u32 addr; 410 i915_reg_t addr;
411 u32 mask; 411 u32 mask;
412 u32 value; 412 u32 value;
413}; 413};
414 414
415/* Convenience macro for adding 32-bit registers. */ 415/* Convenience macro for adding 32-bit registers. */
416#define REG32(address, ...) \ 416#define REG32(_reg, ...) \
417 { .addr = address, __VA_ARGS__ } 417 { .addr = (_reg), __VA_ARGS__ }
418 418
419/* 419/*
420 * Convenience macro for adding 64-bit registers. 420 * Convenience macro for adding 64-bit registers.
@@ -423,8 +423,13 @@ struct drm_i915_reg_descriptor {
423 * access commands only allow 32-bit accesses. Hence, we have to include 423 * access commands only allow 32-bit accesses. Hence, we have to include
424 * entries for both halves of the 64-bit registers. 424 * entries for both halves of the 64-bit registers.
425 */ 425 */
426#define REG64(addr) \ 426#define REG64(_reg) \
427 REG32(addr), REG32(addr + sizeof(u32)) 427 { .addr = _reg }, \
428 { .addr = _reg ## _UDW }
429
430#define REG64_IDX(_reg, idx) \
431 { .addr = _reg(idx) }, \
432 { .addr = _reg ## _UDW(idx) }
428 433
429static const struct drm_i915_reg_descriptor gen7_render_regs[] = { 434static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
430 REG64(GPGPU_THREADS_DISPATCHED), 435 REG64(GPGPU_THREADS_DISPATCHED),
@@ -451,14 +456,14 @@ static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
451 REG32(GEN7_GPGPU_DISPATCHDIMX), 456 REG32(GEN7_GPGPU_DISPATCHDIMX),
452 REG32(GEN7_GPGPU_DISPATCHDIMY), 457 REG32(GEN7_GPGPU_DISPATCHDIMY),
453 REG32(GEN7_GPGPU_DISPATCHDIMZ), 458 REG32(GEN7_GPGPU_DISPATCHDIMZ),
454 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(0)), 459 REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 0),
455 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(1)), 460 REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 1),
456 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(2)), 461 REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 2),
457 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(3)), 462 REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 3),
458 REG64(GEN7_SO_PRIM_STORAGE_NEEDED(0)), 463 REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 0),
459 REG64(GEN7_SO_PRIM_STORAGE_NEEDED(1)), 464 REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 1),
460 REG64(GEN7_SO_PRIM_STORAGE_NEEDED(2)), 465 REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 2),
461 REG64(GEN7_SO_PRIM_STORAGE_NEEDED(3)), 466 REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 3),
462 REG32(GEN7_SO_WRITE_OFFSET(0)), 467 REG32(GEN7_SO_WRITE_OFFSET(0)),
463 REG32(GEN7_SO_WRITE_OFFSET(1)), 468 REG32(GEN7_SO_WRITE_OFFSET(1)),
464 REG32(GEN7_SO_WRITE_OFFSET(2)), 469 REG32(GEN7_SO_WRITE_OFFSET(2)),
@@ -592,7 +597,7 @@ static bool check_sorted(int ring_id,
592 bool ret = true; 597 bool ret = true;
593 598
594 for (i = 0; i < reg_count; i++) { 599 for (i = 0; i < reg_count; i++) {
595 u32 curr = reg_table[i].addr; 600 u32 curr = i915_mmio_reg_offset(reg_table[i].addr);
596 601
597 if (curr < previous) { 602 if (curr < previous) {
598 DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n", 603 DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n",
@@ -847,7 +852,7 @@ find_reg(const struct drm_i915_reg_descriptor *table,
847 int i; 852 int i;
848 853
849 for (i = 0; i < count; i++) { 854 for (i = 0; i < count; i++) {
850 if (table[i].addr == addr) 855 if (i915_mmio_reg_offset(table[i].addr) == addr)
851 return &table[i]; 856 return &table[i];
852 } 857 }
853 } 858 }
@@ -1023,7 +1028,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
1023 * to the register. Hence, limit OACONTROL writes to 1028 * to the register. Hence, limit OACONTROL writes to
1024 * only MI_LOAD_REGISTER_IMM commands. 1029 * only MI_LOAD_REGISTER_IMM commands.
1025 */ 1030 */
1026 if (reg_addr == OACONTROL) { 1031 if (reg_addr == i915_mmio_reg_offset(OACONTROL)) {
1027 if (desc->cmd.value == MI_LOAD_REGISTER_MEM) { 1032 if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
1028 DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n"); 1033 DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
1029 return false; 1034 return false;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a3b22bdacd44..a8721fccd8a0 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1252,18 +1252,21 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
1252 1252
1253 max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 0 : 1253 max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 0 :
1254 rp_state_cap >> 16) & 0xff; 1254 rp_state_cap >> 16) & 0xff;
1255 max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1); 1255 max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
1256 GEN9_FREQ_SCALER : 1);
1256 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", 1257 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1257 intel_gpu_freq(dev_priv, max_freq)); 1258 intel_gpu_freq(dev_priv, max_freq));
1258 1259
1259 max_freq = (rp_state_cap & 0xff00) >> 8; 1260 max_freq = (rp_state_cap & 0xff00) >> 8;
1260 max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1); 1261 max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
1262 GEN9_FREQ_SCALER : 1);
1261 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", 1263 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1262 intel_gpu_freq(dev_priv, max_freq)); 1264 intel_gpu_freq(dev_priv, max_freq));
1263 1265
1264 max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 16 : 1266 max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 16 :
1265 rp_state_cap >> 0) & 0xff; 1267 rp_state_cap >> 0) & 0xff;
1266 max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1); 1268 max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
1269 GEN9_FREQ_SCALER : 1);
1267 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 1270 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1268 intel_gpu_freq(dev_priv, max_freq)); 1271 intel_gpu_freq(dev_priv, max_freq));
1269 seq_printf(m, "Max overclocked frequency: %dMHz\n", 1272 seq_printf(m, "Max overclocked frequency: %dMHz\n",
@@ -1523,7 +1526,7 @@ static int gen6_drpc_info(struct seq_file *m)
1523 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51)); 1526 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1524 } 1527 }
1525 1528
1526 gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS); 1529 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1527 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true); 1530 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1528 1531
1529 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1532 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
@@ -1636,11 +1639,11 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
1636 intel_runtime_pm_get(dev_priv); 1639 intel_runtime_pm_get(dev_priv);
1637 mutex_lock(&dev_priv->fbc.lock); 1640 mutex_lock(&dev_priv->fbc.lock);
1638 1641
1639 if (intel_fbc_enabled(dev_priv)) 1642 if (intel_fbc_is_active(dev_priv))
1640 seq_puts(m, "FBC enabled\n"); 1643 seq_puts(m, "FBC enabled\n");
1641 else 1644 else
1642 seq_printf(m, "FBC disabled: %s\n", 1645 seq_printf(m, "FBC disabled: %s\n",
1643 intel_no_fbc_reason_str(dev_priv->fbc.no_fbc_reason)); 1646 dev_priv->fbc.no_fbc_reason);
1644 1647
1645 if (INTEL_INFO(dev_priv)->gen >= 7) 1648 if (INTEL_INFO(dev_priv)->gen >= 7)
1646 seq_printf(m, "Compressing: %s\n", 1649 seq_printf(m, "Compressing: %s\n",
@@ -1801,7 +1804,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
1801 if (ret) 1804 if (ret)
1802 goto out; 1805 goto out;
1803 1806
1804 if (IS_SKYLAKE(dev)) { 1807 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
1805 /* Convert GT frequency to 50 HZ units */ 1808 /* Convert GT frequency to 50 HZ units */
1806 min_gpu_freq = 1809 min_gpu_freq =
1807 dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER; 1810 dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
@@ -1821,7 +1824,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
1821 &ia_freq); 1824 &ia_freq);
1822 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", 1825 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1823 intel_gpu_freq(dev_priv, (gpu_freq * 1826 intel_gpu_freq(dev_priv, (gpu_freq *
1824 (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1))), 1827 (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
1828 GEN9_FREQ_SCALER : 1))),
1825 ((ia_freq >> 0) & 0xff) * 100, 1829 ((ia_freq >> 0) & 0xff) * 100,
1826 ((ia_freq >> 8) & 0xff) * 100); 1830 ((ia_freq >> 8) & 0xff) * 100);
1827 } 1831 }
@@ -1865,31 +1869,29 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1865{ 1869{
1866 struct drm_info_node *node = m->private; 1870 struct drm_info_node *node = m->private;
1867 struct drm_device *dev = node->minor->dev; 1871 struct drm_device *dev = node->minor->dev;
1868 struct intel_fbdev *ifbdev = NULL; 1872 struct intel_framebuffer *fbdev_fb = NULL;
1869 struct intel_framebuffer *fb;
1870 struct drm_framebuffer *drm_fb; 1873 struct drm_framebuffer *drm_fb;
1871 1874
1872#ifdef CONFIG_DRM_FBDEV_EMULATION 1875#ifdef CONFIG_DRM_FBDEV_EMULATION
1873 struct drm_i915_private *dev_priv = dev->dev_private; 1876 if (to_i915(dev)->fbdev) {
1874 1877 fbdev_fb = to_intel_framebuffer(to_i915(dev)->fbdev->helper.fb);
1875 ifbdev = dev_priv->fbdev; 1878
1876 fb = to_intel_framebuffer(ifbdev->helper.fb); 1879 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1877 1880 fbdev_fb->base.width,
1878 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", 1881 fbdev_fb->base.height,
1879 fb->base.width, 1882 fbdev_fb->base.depth,
1880 fb->base.height, 1883 fbdev_fb->base.bits_per_pixel,
1881 fb->base.depth, 1884 fbdev_fb->base.modifier[0],
1882 fb->base.bits_per_pixel, 1885 atomic_read(&fbdev_fb->base.refcount.refcount));
1883 fb->base.modifier[0], 1886 describe_obj(m, fbdev_fb->obj);
1884 atomic_read(&fb->base.refcount.refcount)); 1887 seq_putc(m, '\n');
1885 describe_obj(m, fb->obj); 1888 }
1886 seq_putc(m, '\n');
1887#endif 1889#endif
1888 1890
1889 mutex_lock(&dev->mode_config.fb_lock); 1891 mutex_lock(&dev->mode_config.fb_lock);
1890 drm_for_each_fb(drm_fb, dev) { 1892 drm_for_each_fb(drm_fb, dev) {
1891 fb = to_intel_framebuffer(drm_fb); 1893 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1892 if (ifbdev && &fb->base == ifbdev->helper.fb) 1894 if (fb == fbdev_fb)
1893 continue; 1895 continue;
1894 1896
1895 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", 1897 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
@@ -2402,6 +2404,12 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
2402 guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted); 2404 guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
2403 seq_printf(m, "\tversion found: %d.%d\n", 2405 seq_printf(m, "\tversion found: %d.%d\n",
2404 guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found); 2406 guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found);
2407 seq_printf(m, "\theader: offset is %d; size = %d\n",
2408 guc_fw->header_offset, guc_fw->header_size);
2409 seq_printf(m, "\tuCode: offset is %d; size = %d\n",
2410 guc_fw->ucode_offset, guc_fw->ucode_size);
2411 seq_printf(m, "\tRSA: offset is %d; size = %d\n",
2412 guc_fw->rsa_offset, guc_fw->rsa_size);
2405 2413
2406 tmp = I915_READ(GUC_STATUS); 2414 tmp = I915_READ(GUC_STATUS);
2407 2415
@@ -2461,15 +2469,15 @@ static int i915_guc_info(struct seq_file *m, void *data)
2461 if (!HAS_GUC_SCHED(dev_priv->dev)) 2469 if (!HAS_GUC_SCHED(dev_priv->dev))
2462 return 0; 2470 return 0;
2463 2471
2472 if (mutex_lock_interruptible(&dev->struct_mutex))
2473 return 0;
2474
2464 /* Take a local copy of the GuC data, so we can dump it at leisure */ 2475 /* Take a local copy of the GuC data, so we can dump it at leisure */
2465 spin_lock(&dev_priv->guc.host2guc_lock);
2466 guc = dev_priv->guc; 2476 guc = dev_priv->guc;
2467 if (guc.execbuf_client) { 2477 if (guc.execbuf_client)
2468 spin_lock(&guc.execbuf_client->wq_lock);
2469 client = *guc.execbuf_client; 2478 client = *guc.execbuf_client;
2470 spin_unlock(&guc.execbuf_client->wq_lock); 2479
2471 } 2480 mutex_unlock(&dev->struct_mutex);
2472 spin_unlock(&dev_priv->guc.host2guc_lock);
2473 2481
2474 seq_printf(m, "GuC total action count: %llu\n", guc.action_count); 2482 seq_printf(m, "GuC total action count: %llu\n", guc.action_count);
2475 seq_printf(m, "GuC action failure count: %u\n", guc.action_fail); 2483 seq_printf(m, "GuC action failure count: %u\n", guc.action_fail);
@@ -2550,7 +2558,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
2550 yesno(work_busy(&dev_priv->psr.work.work))); 2558 yesno(work_busy(&dev_priv->psr.work.work)));
2551 2559
2552 if (HAS_DDI(dev)) 2560 if (HAS_DDI(dev))
2553 enabled = I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE; 2561 enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
2554 else { 2562 else {
2555 for_each_pipe(dev_priv, pipe) { 2563 for_each_pipe(dev_priv, pipe) {
2556 stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) & 2564 stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
@@ -2570,9 +2578,12 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
2570 } 2578 }
2571 seq_puts(m, "\n"); 2579 seq_puts(m, "\n");
2572 2580
2573 /* CHV PSR has no kind of performance counter */ 2581 /*
2574 if (HAS_DDI(dev)) { 2582 * VLV/CHV PSR has no kind of performance counter
2575 psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) & 2583 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2584 */
2585 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2586 psrperf = I915_READ(EDP_PSR_PERF_CNT) &
2576 EDP_PSR_PERF_CNT_MASK; 2587 EDP_PSR_PERF_CNT_MASK;
2577 2588
2578 seq_printf(m, "Performance_Counter: %u\n", psrperf); 2589 seq_printf(m, "Performance_Counter: %u\n", psrperf);
@@ -2673,75 +2684,6 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2673 return 0; 2684 return 0;
2674} 2685}
2675 2686
2676static const char *power_domain_str(enum intel_display_power_domain domain)
2677{
2678 switch (domain) {
2679 case POWER_DOMAIN_PIPE_A:
2680 return "PIPE_A";
2681 case POWER_DOMAIN_PIPE_B:
2682 return "PIPE_B";
2683 case POWER_DOMAIN_PIPE_C:
2684 return "PIPE_C";
2685 case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
2686 return "PIPE_A_PANEL_FITTER";
2687 case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
2688 return "PIPE_B_PANEL_FITTER";
2689 case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
2690 return "PIPE_C_PANEL_FITTER";
2691 case POWER_DOMAIN_TRANSCODER_A:
2692 return "TRANSCODER_A";
2693 case POWER_DOMAIN_TRANSCODER_B:
2694 return "TRANSCODER_B";
2695 case POWER_DOMAIN_TRANSCODER_C:
2696 return "TRANSCODER_C";
2697 case POWER_DOMAIN_TRANSCODER_EDP:
2698 return "TRANSCODER_EDP";
2699 case POWER_DOMAIN_PORT_DDI_A_2_LANES:
2700 return "PORT_DDI_A_2_LANES";
2701 case POWER_DOMAIN_PORT_DDI_A_4_LANES:
2702 return "PORT_DDI_A_4_LANES";
2703 case POWER_DOMAIN_PORT_DDI_B_2_LANES:
2704 return "PORT_DDI_B_2_LANES";
2705 case POWER_DOMAIN_PORT_DDI_B_4_LANES:
2706 return "PORT_DDI_B_4_LANES";
2707 case POWER_DOMAIN_PORT_DDI_C_2_LANES:
2708 return "PORT_DDI_C_2_LANES";
2709 case POWER_DOMAIN_PORT_DDI_C_4_LANES:
2710 return "PORT_DDI_C_4_LANES";
2711 case POWER_DOMAIN_PORT_DDI_D_2_LANES:
2712 return "PORT_DDI_D_2_LANES";
2713 case POWER_DOMAIN_PORT_DDI_D_4_LANES:
2714 return "PORT_DDI_D_4_LANES";
2715 case POWER_DOMAIN_PORT_DDI_E_2_LANES:
2716 return "PORT_DDI_E_2_LANES";
2717 case POWER_DOMAIN_PORT_DSI:
2718 return "PORT_DSI";
2719 case POWER_DOMAIN_PORT_CRT:
2720 return "PORT_CRT";
2721 case POWER_DOMAIN_PORT_OTHER:
2722 return "PORT_OTHER";
2723 case POWER_DOMAIN_VGA:
2724 return "VGA";
2725 case POWER_DOMAIN_AUDIO:
2726 return "AUDIO";
2727 case POWER_DOMAIN_PLLS:
2728 return "PLLS";
2729 case POWER_DOMAIN_AUX_A:
2730 return "AUX_A";
2731 case POWER_DOMAIN_AUX_B:
2732 return "AUX_B";
2733 case POWER_DOMAIN_AUX_C:
2734 return "AUX_C";
2735 case POWER_DOMAIN_AUX_D:
2736 return "AUX_D";
2737 case POWER_DOMAIN_INIT:
2738 return "INIT";
2739 default:
2740 MISSING_CASE(domain);
2741 return "?";
2742 }
2743}
2744
2745static int i915_power_domain_info(struct seq_file *m, void *unused) 2687static int i915_power_domain_info(struct seq_file *m, void *unused)
2746{ 2688{
2747 struct drm_info_node *node = m->private; 2689 struct drm_info_node *node = m->private;
@@ -2767,7 +2709,7 @@ static int i915_power_domain_info(struct seq_file *m, void *unused)
2767 continue; 2709 continue;
2768 2710
2769 seq_printf(m, " %-23s %d\n", 2711 seq_printf(m, " %-23s %d\n",
2770 power_domain_str(power_domain), 2712 intel_display_power_domain_str(power_domain),
2771 power_domains->domain_use_count[power_domain]); 2713 power_domains->domain_use_count[power_domain]);
2772 } 2714 }
2773 } 2715 }
@@ -2777,6 +2719,51 @@ static int i915_power_domain_info(struct seq_file *m, void *unused)
2777 return 0; 2719 return 0;
2778} 2720}
2779 2721
2722static int i915_dmc_info(struct seq_file *m, void *unused)
2723{
2724 struct drm_info_node *node = m->private;
2725 struct drm_device *dev = node->minor->dev;
2726 struct drm_i915_private *dev_priv = dev->dev_private;
2727 struct intel_csr *csr;
2728
2729 if (!HAS_CSR(dev)) {
2730 seq_puts(m, "not supported\n");
2731 return 0;
2732 }
2733
2734 csr = &dev_priv->csr;
2735
2736 intel_runtime_pm_get(dev_priv);
2737
2738 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2739 seq_printf(m, "path: %s\n", csr->fw_path);
2740
2741 if (!csr->dmc_payload)
2742 goto out;
2743
2744 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2745 CSR_VERSION_MINOR(csr->version));
2746
2747 if (IS_SKYLAKE(dev) && csr->version >= CSR_VERSION(1, 6)) {
2748 seq_printf(m, "DC3 -> DC5 count: %d\n",
2749 I915_READ(SKL_CSR_DC3_DC5_COUNT));
2750 seq_printf(m, "DC5 -> DC6 count: %d\n",
2751 I915_READ(SKL_CSR_DC5_DC6_COUNT));
2752 } else if (IS_BROXTON(dev) && csr->version >= CSR_VERSION(1, 4)) {
2753 seq_printf(m, "DC3 -> DC5 count: %d\n",
2754 I915_READ(BXT_CSR_DC3_DC5_COUNT));
2755 }
2756
2757out:
2758 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2759 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2760 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2761
2762 intel_runtime_pm_put(dev_priv);
2763
2764 return 0;
2765}
2766
2780static void intel_seq_print_mode(struct seq_file *m, int tabs, 2767static void intel_seq_print_mode(struct seq_file *m, int tabs,
2781 struct drm_display_mode *mode) 2768 struct drm_display_mode *mode)
2782{ 2769{
@@ -2944,6 +2931,107 @@ static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
2944 return cursor_active(dev, pipe); 2931 return cursor_active(dev, pipe);
2945} 2932}
2946 2933
2934static const char *plane_type(enum drm_plane_type type)
2935{
2936 switch (type) {
2937 case DRM_PLANE_TYPE_OVERLAY:
2938 return "OVL";
2939 case DRM_PLANE_TYPE_PRIMARY:
2940 return "PRI";
2941 case DRM_PLANE_TYPE_CURSOR:
2942 return "CUR";
2943 /*
2944 * Deliberately omitting default: to generate compiler warnings
2945 * when a new drm_plane_type gets added.
2946 */
2947 }
2948
2949 return "unknown";
2950}
2951
2952static const char *plane_rotation(unsigned int rotation)
2953{
2954 static char buf[48];
2955 /*
2956 * According to doc only one DRM_ROTATE_ is allowed but this
2957 * will print them all to visualize if the values are misused
2958 */
2959 snprintf(buf, sizeof(buf),
2960 "%s%s%s%s%s%s(0x%08x)",
2961 (rotation & BIT(DRM_ROTATE_0)) ? "0 " : "",
2962 (rotation & BIT(DRM_ROTATE_90)) ? "90 " : "",
2963 (rotation & BIT(DRM_ROTATE_180)) ? "180 " : "",
2964 (rotation & BIT(DRM_ROTATE_270)) ? "270 " : "",
2965 (rotation & BIT(DRM_REFLECT_X)) ? "FLIPX " : "",
2966 (rotation & BIT(DRM_REFLECT_Y)) ? "FLIPY " : "",
2967 rotation);
2968
2969 return buf;
2970}
2971
2972static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2973{
2974 struct drm_info_node *node = m->private;
2975 struct drm_device *dev = node->minor->dev;
2976 struct intel_plane *intel_plane;
2977
2978 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2979 struct drm_plane_state *state;
2980 struct drm_plane *plane = &intel_plane->base;
2981
2982 if (!plane->state) {
2983 seq_puts(m, "plane->state is NULL!\n");
2984 continue;
2985 }
2986
2987 state = plane->state;
2988
2989 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
2990 plane->base.id,
2991 plane_type(intel_plane->base.type),
2992 state->crtc_x, state->crtc_y,
2993 state->crtc_w, state->crtc_h,
2994 (state->src_x >> 16),
2995 ((state->src_x & 0xffff) * 15625) >> 10,
2996 (state->src_y >> 16),
2997 ((state->src_y & 0xffff) * 15625) >> 10,
2998 (state->src_w >> 16),
2999 ((state->src_w & 0xffff) * 15625) >> 10,
3000 (state->src_h >> 16),
3001 ((state->src_h & 0xffff) * 15625) >> 10,
3002 state->fb ? drm_get_format_name(state->fb->pixel_format) : "N/A",
3003 plane_rotation(state->rotation));
3004 }
3005}
3006
3007static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3008{
3009 struct intel_crtc_state *pipe_config;
3010 int num_scalers = intel_crtc->num_scalers;
3011 int i;
3012
3013 pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3014
3015 /* Not all platformas have a scaler */
3016 if (num_scalers) {
3017 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3018 num_scalers,
3019 pipe_config->scaler_state.scaler_users,
3020 pipe_config->scaler_state.scaler_id);
3021
3022 for (i = 0; i < SKL_NUM_SCALERS; i++) {
3023 struct intel_scaler *sc =
3024 &pipe_config->scaler_state.scalers[i];
3025
3026 seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3027 i, yesno(sc->in_use), sc->mode);
3028 }
3029 seq_puts(m, "\n");
3030 } else {
3031 seq_puts(m, "\tNo scalers available on this platform\n");
3032 }
3033}
3034
2947static int i915_display_info(struct seq_file *m, void *unused) 3035static int i915_display_info(struct seq_file *m, void *unused)
2948{ 3036{
2949 struct drm_info_node *node = m->private; 3037 struct drm_info_node *node = m->private;
@@ -2963,10 +3051,12 @@ static int i915_display_info(struct seq_file *m, void *unused)
2963 3051
2964 pipe_config = to_intel_crtc_state(crtc->base.state); 3052 pipe_config = to_intel_crtc_state(crtc->base.state);
2965 3053
2966 seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n", 3054 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
2967 crtc->base.base.id, pipe_name(crtc->pipe), 3055 crtc->base.base.id, pipe_name(crtc->pipe),
2968 yesno(pipe_config->base.active), 3056 yesno(pipe_config->base.active),
2969 pipe_config->pipe_src_w, pipe_config->pipe_src_h); 3057 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3058 yesno(pipe_config->dither), pipe_config->pipe_bpp);
3059
2970 if (pipe_config->base.active) { 3060 if (pipe_config->base.active) {
2971 intel_crtc_info(m, crtc); 3061 intel_crtc_info(m, crtc);
2972 3062
@@ -2976,6 +3066,8 @@ static int i915_display_info(struct seq_file *m, void *unused)
2976 x, y, crtc->base.cursor->state->crtc_w, 3066 x, y, crtc->base.cursor->state->crtc_w,
2977 crtc->base.cursor->state->crtc_h, 3067 crtc->base.cursor->state->crtc_h,
2978 crtc->cursor_addr, yesno(active)); 3068 crtc->cursor_addr, yesno(active));
3069 intel_scaler_info(m, crtc);
3070 intel_plane_info(m, crtc);
2979 } 3071 }
2980 3072
2981 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n", 3073 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
@@ -3110,7 +3202,8 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
3110 3202
3111 seq_printf(m, "Workarounds applied: %d\n", dev_priv->workarounds.count); 3203 seq_printf(m, "Workarounds applied: %d\n", dev_priv->workarounds.count);
3112 for (i = 0; i < dev_priv->workarounds.count; ++i) { 3204 for (i = 0; i < dev_priv->workarounds.count; ++i) {
3113 u32 addr, mask, value, read; 3205 i915_reg_t addr;
3206 u32 mask, value, read;
3114 bool ok; 3207 bool ok;
3115 3208
3116 addr = dev_priv->workarounds.reg[i].addr; 3209 addr = dev_priv->workarounds.reg[i].addr;
@@ -3119,7 +3212,7 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
3119 read = I915_READ(addr); 3212 read = I915_READ(addr);
3120 ok = (value & mask) == (read & mask); 3213 ok = (value & mask) == (read & mask);
3121 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n", 3214 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
3122 addr, value, mask, read, ok ? "OK" : "FAIL"); 3215 i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL");
3123 } 3216 }
3124 3217
3125 intel_runtime_pm_put(dev_priv); 3218 intel_runtime_pm_put(dev_priv);
@@ -5023,7 +5116,7 @@ static void gen9_sseu_device_status(struct drm_device *dev,
5023 5116
5024 stat->slice_total++; 5117 stat->slice_total++;
5025 5118
5026 if (IS_SKYLAKE(dev)) 5119 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
5027 ss_cnt = INTEL_INFO(dev)->subslice_per_slice; 5120 ss_cnt = INTEL_INFO(dev)->subslice_per_slice;
5028 5121
5029 for (ss = 0; ss < ss_max; ss++) { 5122 for (ss = 0; ss < ss_max; ss++) {
@@ -5236,6 +5329,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
5236 {"i915_energy_uJ", i915_energy_uJ, 0}, 5329 {"i915_energy_uJ", i915_energy_uJ, 0},
5237 {"i915_runtime_pm_status", i915_runtime_pm_status, 0}, 5330 {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
5238 {"i915_power_domain_info", i915_power_domain_info, 0}, 5331 {"i915_power_domain_info", i915_power_domain_info, 0},
5332 {"i915_dmc_info", i915_dmc_info, 0},
5239 {"i915_display_info", i915_display_info, 0}, 5333 {"i915_display_info", i915_display_info, 0},
5240 {"i915_semaphore_status", i915_semaphore_status, 0}, 5334 {"i915_semaphore_status", i915_semaphore_status, 0},
5241 {"i915_shared_dplls_info", i915_shared_dplls_info, 0}, 5335 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index b4741d121a74..a81c76603544 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -28,7 +28,6 @@
28 28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 30
31#include <linux/async.h>
32#include <drm/drmP.h> 31#include <drm/drmP.h>
33#include <drm/drm_crtc_helper.h> 32#include <drm/drm_crtc_helper.h>
34#include <drm/drm_fb_helper.h> 33#include <drm/drm_fb_helper.h>
@@ -338,7 +337,7 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
338 i915_resume_switcheroo(dev); 337 i915_resume_switcheroo(dev);
339 dev->switch_power_state = DRM_SWITCH_POWER_ON; 338 dev->switch_power_state = DRM_SWITCH_POWER_ON;
340 } else { 339 } else {
341 pr_err("switched off\n"); 340 pr_info("switched off\n");
342 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 341 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
343 i915_suspend_switcheroo(dev, pmm); 342 i915_suspend_switcheroo(dev, pmm);
344 dev->switch_power_state = DRM_SWITCH_POWER_OFF; 343 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
@@ -396,7 +395,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
396 if (ret) 395 if (ret)
397 goto cleanup_vga_switcheroo; 396 goto cleanup_vga_switcheroo;
398 397
399 intel_power_domains_init_hw(dev_priv); 398 intel_power_domains_init_hw(dev_priv, false);
399
400 intel_csr_ucode_init(dev_priv);
400 401
401 ret = intel_irq_install(dev_priv); 402 ret = intel_irq_install(dev_priv);
402 if (ret) 403 if (ret)
@@ -437,7 +438,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
437 * scanning against hotplug events. Hence do this first and ignore the 438 * scanning against hotplug events. Hence do this first and ignore the
438 * tiny window where we will loose hotplug notifactions. 439 * tiny window where we will loose hotplug notifactions.
439 */ 440 */
440 async_schedule(intel_fbdev_initial_config, dev_priv); 441 intel_fbdev_initial_config_async(dev);
441 442
442 drm_kms_helper_poll_init(dev); 443 drm_kms_helper_poll_init(dev);
443 444
@@ -663,7 +664,8 @@ static void gen9_sseu_info_init(struct drm_device *dev)
663 * supports EU power gating on devices with more than one EU 664 * supports EU power gating on devices with more than one EU
664 * pair per subslice. 665 * pair per subslice.
665 */ 666 */
666 info->has_slice_pg = (IS_SKYLAKE(dev) && (info->slice_total > 1)); 667 info->has_slice_pg = ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
668 (info->slice_total > 1));
667 info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1)); 669 info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1));
668 info->has_eu_pg = (info->eu_per_subslice > 2); 670 info->has_eu_pg = (info->eu_per_subslice > 2);
669} 671}
@@ -890,7 +892,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
890 spin_lock_init(&dev_priv->mmio_flip_lock); 892 spin_lock_init(&dev_priv->mmio_flip_lock);
891 mutex_init(&dev_priv->sb_lock); 893 mutex_init(&dev_priv->sb_lock);
892 mutex_init(&dev_priv->modeset_restore_lock); 894 mutex_init(&dev_priv->modeset_restore_lock);
893 mutex_init(&dev_priv->csr_lock);
894 mutex_init(&dev_priv->av_mutex); 895 mutex_init(&dev_priv->av_mutex);
895 896
896 intel_pm_setup(dev); 897 intel_pm_setup(dev);
@@ -937,9 +938,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
937 938
938 intel_uncore_init(dev); 939 intel_uncore_init(dev);
939 940
940 /* Load CSR Firmware for SKL */
941 intel_csr_ucode_init(dev);
942
943 ret = i915_gem_gtt_init(dev); 941 ret = i915_gem_gtt_init(dev);
944 if (ret) 942 if (ret)
945 goto out_freecsr; 943 goto out_freecsr;
@@ -1113,7 +1111,7 @@ out_mtrrfree:
1113out_gtt: 1111out_gtt:
1114 i915_global_gtt_cleanup(dev); 1112 i915_global_gtt_cleanup(dev);
1115out_freecsr: 1113out_freecsr:
1116 intel_csr_ucode_fini(dev); 1114 intel_csr_ucode_fini(dev_priv);
1117 intel_uncore_fini(dev); 1115 intel_uncore_fini(dev);
1118 pci_iounmap(dev->pdev, dev_priv->regs); 1116 pci_iounmap(dev->pdev, dev_priv->regs);
1119put_bridge: 1117put_bridge:
@@ -1131,6 +1129,8 @@ int i915_driver_unload(struct drm_device *dev)
1131 struct drm_i915_private *dev_priv = dev->dev_private; 1129 struct drm_i915_private *dev_priv = dev->dev_private;
1132 int ret; 1130 int ret;
1133 1131
1132 intel_fbdev_fini(dev);
1133
1134 i915_audio_component_cleanup(dev_priv); 1134 i915_audio_component_cleanup(dev_priv);
1135 1135
1136 ret = i915_gem_suspend(dev); 1136 ret = i915_gem_suspend(dev);
@@ -1153,8 +1153,6 @@ int i915_driver_unload(struct drm_device *dev)
1153 1153
1154 acpi_video_unregister(); 1154 acpi_video_unregister();
1155 1155
1156 intel_fbdev_fini(dev);
1157
1158 drm_vblank_cleanup(dev); 1156 drm_vblank_cleanup(dev);
1159 1157
1160 intel_modeset_cleanup(dev); 1158 intel_modeset_cleanup(dev);
@@ -1196,7 +1194,7 @@ int i915_driver_unload(struct drm_device *dev)
1196 intel_fbc_cleanup_cfb(dev_priv); 1194 intel_fbc_cleanup_cfb(dev_priv);
1197 i915_gem_cleanup_stolen(dev); 1195 i915_gem_cleanup_stolen(dev);
1198 1196
1199 intel_csr_ucode_fini(dev); 1197 intel_csr_ucode_fini(dev_priv);
1200 1198
1201 intel_teardown_gmbus(dev); 1199 intel_teardown_gmbus(dev);
1202 intel_teardown_mchbar(dev); 1200 intel_teardown_mchbar(dev);
@@ -1264,8 +1262,6 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1264{ 1262{
1265 struct drm_i915_file_private *file_priv = file->driver_priv; 1263 struct drm_i915_file_private *file_priv = file->driver_priv;
1266 1264
1267 if (file_priv && file_priv->bsd_ring)
1268 file_priv->bsd_ring = NULL;
1269 kfree(file_priv); 1265 kfree(file_priv);
1270} 1266}
1271 1267
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 760e0ce4aa26..e6935f1cb689 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -228,121 +228,83 @@ static const struct intel_device_info intel_sandybridge_m_info = {
228 .need_gfx_hws = 1, .has_hotplug = 1, \ 228 .need_gfx_hws = 1, .has_hotplug = 1, \
229 .has_fbc = 1, \ 229 .has_fbc = 1, \
230 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ 230 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
231 .has_llc = 1 231 .has_llc = 1, \
232 GEN_DEFAULT_PIPEOFFSETS, \
233 IVB_CURSOR_OFFSETS
232 234
233static const struct intel_device_info intel_ivybridge_d_info = { 235static const struct intel_device_info intel_ivybridge_d_info = {
234 GEN7_FEATURES, 236 GEN7_FEATURES,
235 .is_ivybridge = 1, 237 .is_ivybridge = 1,
236 GEN_DEFAULT_PIPEOFFSETS,
237 IVB_CURSOR_OFFSETS,
238}; 238};
239 239
240static const struct intel_device_info intel_ivybridge_m_info = { 240static const struct intel_device_info intel_ivybridge_m_info = {
241 GEN7_FEATURES, 241 GEN7_FEATURES,
242 .is_ivybridge = 1, 242 .is_ivybridge = 1,
243 .is_mobile = 1, 243 .is_mobile = 1,
244 GEN_DEFAULT_PIPEOFFSETS,
245 IVB_CURSOR_OFFSETS,
246}; 244};
247 245
248static const struct intel_device_info intel_ivybridge_q_info = { 246static const struct intel_device_info intel_ivybridge_q_info = {
249 GEN7_FEATURES, 247 GEN7_FEATURES,
250 .is_ivybridge = 1, 248 .is_ivybridge = 1,
251 .num_pipes = 0, /* legal, last one wins */ 249 .num_pipes = 0, /* legal, last one wins */
252 GEN_DEFAULT_PIPEOFFSETS,
253 IVB_CURSOR_OFFSETS,
254}; 250};
255 251
252#define VLV_FEATURES \
253 .gen = 7, .num_pipes = 2, \
254 .need_gfx_hws = 1, .has_hotplug = 1, \
255 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
256 .display_mmio_offset = VLV_DISPLAY_BASE, \
257 GEN_DEFAULT_PIPEOFFSETS, \
258 CURSOR_OFFSETS
259
256static const struct intel_device_info intel_valleyview_m_info = { 260static const struct intel_device_info intel_valleyview_m_info = {
257 GEN7_FEATURES, 261 VLV_FEATURES,
258 .is_mobile = 1,
259 .num_pipes = 2,
260 .is_valleyview = 1, 262 .is_valleyview = 1,
261 .display_mmio_offset = VLV_DISPLAY_BASE, 263 .is_mobile = 1,
262 .has_fbc = 0, /* legal, last one wins */
263 .has_llc = 0, /* legal, last one wins */
264 GEN_DEFAULT_PIPEOFFSETS,
265 CURSOR_OFFSETS,
266}; 264};
267 265
268static const struct intel_device_info intel_valleyview_d_info = { 266static const struct intel_device_info intel_valleyview_d_info = {
269 GEN7_FEATURES, 267 VLV_FEATURES,
270 .num_pipes = 2,
271 .is_valleyview = 1, 268 .is_valleyview = 1,
272 .display_mmio_offset = VLV_DISPLAY_BASE,
273 .has_fbc = 0, /* legal, last one wins */
274 .has_llc = 0, /* legal, last one wins */
275 GEN_DEFAULT_PIPEOFFSETS,
276 CURSOR_OFFSETS,
277}; 269};
278 270
271#define HSW_FEATURES \
272 GEN7_FEATURES, \
273 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
274 .has_ddi = 1, \
275 .has_fpga_dbg = 1
276
279static const struct intel_device_info intel_haswell_d_info = { 277static const struct intel_device_info intel_haswell_d_info = {
280 GEN7_FEATURES, 278 HSW_FEATURES,
281 .is_haswell = 1, 279 .is_haswell = 1,
282 .has_ddi = 1,
283 .has_fpga_dbg = 1,
284 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
285 GEN_DEFAULT_PIPEOFFSETS,
286 IVB_CURSOR_OFFSETS,
287}; 280};
288 281
289static const struct intel_device_info intel_haswell_m_info = { 282static const struct intel_device_info intel_haswell_m_info = {
290 GEN7_FEATURES, 283 HSW_FEATURES,
291 .is_haswell = 1, 284 .is_haswell = 1,
292 .is_mobile = 1, 285 .is_mobile = 1,
293 .has_ddi = 1,
294 .has_fpga_dbg = 1,
295 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
296 GEN_DEFAULT_PIPEOFFSETS,
297 IVB_CURSOR_OFFSETS,
298}; 286};
299 287
300static const struct intel_device_info intel_broadwell_d_info = { 288static const struct intel_device_info intel_broadwell_d_info = {
301 .gen = 8, .num_pipes = 3, 289 HSW_FEATURES,
302 .need_gfx_hws = 1, .has_hotplug = 1, 290 .gen = 8,
303 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
304 .has_llc = 1,
305 .has_ddi = 1,
306 .has_fpga_dbg = 1,
307 .has_fbc = 1,
308 GEN_DEFAULT_PIPEOFFSETS,
309 IVB_CURSOR_OFFSETS,
310}; 291};
311 292
312static const struct intel_device_info intel_broadwell_m_info = { 293static const struct intel_device_info intel_broadwell_m_info = {
313 .gen = 8, .is_mobile = 1, .num_pipes = 3, 294 HSW_FEATURES,
314 .need_gfx_hws = 1, .has_hotplug = 1, 295 .gen = 8, .is_mobile = 1,
315 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
316 .has_llc = 1,
317 .has_ddi = 1,
318 .has_fpga_dbg = 1,
319 .has_fbc = 1,
320 GEN_DEFAULT_PIPEOFFSETS,
321 IVB_CURSOR_OFFSETS,
322}; 296};
323 297
324static const struct intel_device_info intel_broadwell_gt3d_info = { 298static const struct intel_device_info intel_broadwell_gt3d_info = {
325 .gen = 8, .num_pipes = 3, 299 HSW_FEATURES,
326 .need_gfx_hws = 1, .has_hotplug = 1, 300 .gen = 8,
327 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, 301 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
328 .has_llc = 1,
329 .has_ddi = 1,
330 .has_fpga_dbg = 1,
331 .has_fbc = 1,
332 GEN_DEFAULT_PIPEOFFSETS,
333 IVB_CURSOR_OFFSETS,
334}; 302};
335 303
336static const struct intel_device_info intel_broadwell_gt3m_info = { 304static const struct intel_device_info intel_broadwell_gt3m_info = {
337 .gen = 8, .is_mobile = 1, .num_pipes = 3, 305 HSW_FEATURES,
338 .need_gfx_hws = 1, .has_hotplug = 1, 306 .gen = 8, .is_mobile = 1,
339 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, 307 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
340 .has_llc = 1,
341 .has_ddi = 1,
342 .has_fpga_dbg = 1,
343 .has_fbc = 1,
344 GEN_DEFAULT_PIPEOFFSETS,
345 IVB_CURSOR_OFFSETS,
346}; 308};
347 309
348static const struct intel_device_info intel_cherryview_info = { 310static const struct intel_device_info intel_cherryview_info = {
@@ -356,33 +318,21 @@ static const struct intel_device_info intel_cherryview_info = {
356}; 318};
357 319
358static const struct intel_device_info intel_skylake_info = { 320static const struct intel_device_info intel_skylake_info = {
321 HSW_FEATURES,
359 .is_skylake = 1, 322 .is_skylake = 1,
360 .gen = 9, .num_pipes = 3, 323 .gen = 9,
361 .need_gfx_hws = 1, .has_hotplug = 1,
362 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
363 .has_llc = 1,
364 .has_ddi = 1,
365 .has_fpga_dbg = 1,
366 .has_fbc = 1,
367 GEN_DEFAULT_PIPEOFFSETS,
368 IVB_CURSOR_OFFSETS,
369}; 324};
370 325
371static const struct intel_device_info intel_skylake_gt3_info = { 326static const struct intel_device_info intel_skylake_gt3_info = {
327 HSW_FEATURES,
372 .is_skylake = 1, 328 .is_skylake = 1,
373 .gen = 9, .num_pipes = 3, 329 .gen = 9,
374 .need_gfx_hws = 1, .has_hotplug = 1,
375 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, 330 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
376 .has_llc = 1,
377 .has_ddi = 1,
378 .has_fpga_dbg = 1,
379 .has_fbc = 1,
380 GEN_DEFAULT_PIPEOFFSETS,
381 IVB_CURSOR_OFFSETS,
382}; 331};
383 332
384static const struct intel_device_info intel_broxton_info = { 333static const struct intel_device_info intel_broxton_info = {
385 .is_preliminary = 1, 334 .is_preliminary = 1,
335 .is_broxton = 1,
386 .gen = 9, 336 .gen = 9,
387 .need_gfx_hws = 1, .has_hotplug = 1, 337 .need_gfx_hws = 1, .has_hotplug = 1,
388 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, 338 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
@@ -394,50 +344,67 @@ static const struct intel_device_info intel_broxton_info = {
394 IVB_CURSOR_OFFSETS, 344 IVB_CURSOR_OFFSETS,
395}; 345};
396 346
347static const struct intel_device_info intel_kabylake_info = {
348 HSW_FEATURES,
349 .is_preliminary = 1,
350 .is_kabylake = 1,
351 .gen = 9,
352};
353
354static const struct intel_device_info intel_kabylake_gt3_info = {
355 HSW_FEATURES,
356 .is_preliminary = 1,
357 .is_kabylake = 1,
358 .gen = 9,
359 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
360};
361
397/* 362/*
398 * Make sure any device matches here are from most specific to most 363 * Make sure any device matches here are from most specific to most
399 * general. For example, since the Quanta match is based on the subsystem 364 * general. For example, since the Quanta match is based on the subsystem
400 * and subvendor IDs, we need it to come before the more general IVB 365 * and subvendor IDs, we need it to come before the more general IVB
401 * PCI ID matches, otherwise we'll use the wrong info struct above. 366 * PCI ID matches, otherwise we'll use the wrong info struct above.
402 */ 367 */
403#define INTEL_PCI_IDS \ 368static const struct pci_device_id pciidlist[] = {
404 INTEL_I830_IDS(&intel_i830_info), \ 369 INTEL_I830_IDS(&intel_i830_info),
405 INTEL_I845G_IDS(&intel_845g_info), \ 370 INTEL_I845G_IDS(&intel_845g_info),
406 INTEL_I85X_IDS(&intel_i85x_info), \ 371 INTEL_I85X_IDS(&intel_i85x_info),
407 INTEL_I865G_IDS(&intel_i865g_info), \ 372 INTEL_I865G_IDS(&intel_i865g_info),
408 INTEL_I915G_IDS(&intel_i915g_info), \ 373 INTEL_I915G_IDS(&intel_i915g_info),
409 INTEL_I915GM_IDS(&intel_i915gm_info), \ 374 INTEL_I915GM_IDS(&intel_i915gm_info),
410 INTEL_I945G_IDS(&intel_i945g_info), \ 375 INTEL_I945G_IDS(&intel_i945g_info),
411 INTEL_I945GM_IDS(&intel_i945gm_info), \ 376 INTEL_I945GM_IDS(&intel_i945gm_info),
412 INTEL_I965G_IDS(&intel_i965g_info), \ 377 INTEL_I965G_IDS(&intel_i965g_info),
413 INTEL_G33_IDS(&intel_g33_info), \ 378 INTEL_G33_IDS(&intel_g33_info),
414 INTEL_I965GM_IDS(&intel_i965gm_info), \ 379 INTEL_I965GM_IDS(&intel_i965gm_info),
415 INTEL_GM45_IDS(&intel_gm45_info), \ 380 INTEL_GM45_IDS(&intel_gm45_info),
416 INTEL_G45_IDS(&intel_g45_info), \ 381 INTEL_G45_IDS(&intel_g45_info),
417 INTEL_PINEVIEW_IDS(&intel_pineview_info), \ 382 INTEL_PINEVIEW_IDS(&intel_pineview_info),
418 INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), \ 383 INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
419 INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), \ 384 INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
420 INTEL_SNB_D_IDS(&intel_sandybridge_d_info), \ 385 INTEL_SNB_D_IDS(&intel_sandybridge_d_info),
421 INTEL_SNB_M_IDS(&intel_sandybridge_m_info), \ 386 INTEL_SNB_M_IDS(&intel_sandybridge_m_info),
422 INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \ 387 INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
423 INTEL_IVB_M_IDS(&intel_ivybridge_m_info), \ 388 INTEL_IVB_M_IDS(&intel_ivybridge_m_info),
424 INTEL_IVB_D_IDS(&intel_ivybridge_d_info), \ 389 INTEL_IVB_D_IDS(&intel_ivybridge_d_info),
425 INTEL_HSW_D_IDS(&intel_haswell_d_info), \ 390 INTEL_HSW_D_IDS(&intel_haswell_d_info),
426 INTEL_HSW_M_IDS(&intel_haswell_m_info), \ 391 INTEL_HSW_M_IDS(&intel_haswell_m_info),
427 INTEL_VLV_M_IDS(&intel_valleyview_m_info), \ 392 INTEL_VLV_M_IDS(&intel_valleyview_m_info),
428 INTEL_VLV_D_IDS(&intel_valleyview_d_info), \ 393 INTEL_VLV_D_IDS(&intel_valleyview_d_info),
429 INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info), \ 394 INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),
430 INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), \ 395 INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),
431 INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \ 396 INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),
432 INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \ 397 INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info),
433 INTEL_CHV_IDS(&intel_cherryview_info), \ 398 INTEL_CHV_IDS(&intel_cherryview_info),
434 INTEL_SKL_GT1_IDS(&intel_skylake_info), \ 399 INTEL_SKL_GT1_IDS(&intel_skylake_info),
435 INTEL_SKL_GT2_IDS(&intel_skylake_info), \ 400 INTEL_SKL_GT2_IDS(&intel_skylake_info),
436 INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info), \ 401 INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
437 INTEL_BXT_IDS(&intel_broxton_info) 402 INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info),
438 403 INTEL_BXT_IDS(&intel_broxton_info),
439static const struct pci_device_id pciidlist[] = { /* aka */ 404 INTEL_KBL_GT1_IDS(&intel_kabylake_info),
440 INTEL_PCI_IDS, 405 INTEL_KBL_GT2_IDS(&intel_kabylake_info),
406 INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
407 INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
441 {0, 0, 0} 408 {0, 0, 0}
442}; 409};
443 410
@@ -463,7 +430,7 @@ static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
463 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 430 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
464 ret = PCH_LPT; 431 ret = PCH_LPT;
465 DRM_DEBUG_KMS("Assuming LynxPoint PCH\n"); 432 DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
466 } else if (IS_SKYLAKE(dev)) { 433 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
467 ret = PCH_SPT; 434 ret = PCH_SPT;
468 DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n"); 435 DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
469 } 436 }
@@ -526,12 +493,15 @@ void intel_detect_pch(struct drm_device *dev)
526 } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { 493 } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
527 dev_priv->pch_type = PCH_SPT; 494 dev_priv->pch_type = PCH_SPT;
528 DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); 495 DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
529 WARN_ON(!IS_SKYLAKE(dev)); 496 WARN_ON(!IS_SKYLAKE(dev) &&
497 !IS_KABYLAKE(dev));
530 } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) { 498 } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
531 dev_priv->pch_type = PCH_SPT; 499 dev_priv->pch_type = PCH_SPT;
532 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); 500 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
533 WARN_ON(!IS_SKYLAKE(dev)); 501 WARN_ON(!IS_SKYLAKE(dev) &&
534 } else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE) { 502 !IS_KABYLAKE(dev));
503 } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
504 (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE)) {
535 dev_priv->pch_type = intel_virt_detect_pch(dev); 505 dev_priv->pch_type = intel_virt_detect_pch(dev);
536 } else 506 } else
537 continue; 507 continue;
@@ -570,26 +540,6 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
570 return true; 540 return true;
571} 541}
572 542
573void i915_firmware_load_error_print(const char *fw_path, int err)
574{
575 DRM_ERROR("failed to load firmware %s (%d)\n", fw_path, err);
576
577 /*
578 * If the reason is not known assume -ENOENT since that's the most
579 * usual failure mode.
580 */
581 if (!err)
582 err = -ENOENT;
583
584 if (!(IS_BUILTIN(CONFIG_DRM_I915) && err == -ENOENT))
585 return;
586
587 DRM_ERROR(
588 "The driver is built-in, so to load the firmware you need to\n"
589 "include it either in the kernel (see CONFIG_EXTRA_FIRMWARE) or\n"
590 "in your initrd/initramfs image.\n");
591}
592
593static void intel_suspend_encoders(struct drm_i915_private *dev_priv) 543static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
594{ 544{
595 struct drm_device *dev = dev_priv->dev; 545 struct drm_device *dev = dev_priv->dev;
@@ -608,9 +558,16 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
608static int intel_suspend_complete(struct drm_i915_private *dev_priv); 558static int intel_suspend_complete(struct drm_i915_private *dev_priv);
609static int vlv_resume_prepare(struct drm_i915_private *dev_priv, 559static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
610 bool rpm_resume); 560 bool rpm_resume);
611static int skl_resume_prepare(struct drm_i915_private *dev_priv);
612static int bxt_resume_prepare(struct drm_i915_private *dev_priv); 561static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
613 562
563static bool suspend_to_idle(struct drm_i915_private *dev_priv)
564{
565#if IS_ENABLED(CONFIG_ACPI_SLEEP)
566 if (acpi_target_system_state() < ACPI_STATE_S3)
567 return true;
568#endif
569 return false;
570}
614 571
615static int i915_drm_suspend(struct drm_device *dev) 572static int i915_drm_suspend(struct drm_device *dev)
616{ 573{
@@ -663,11 +620,7 @@ static int i915_drm_suspend(struct drm_device *dev)
663 620
664 i915_save_state(dev); 621 i915_save_state(dev);
665 622
666 opregion_target_state = PCI_D3cold; 623 opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
667#if IS_ENABLED(CONFIG_ACPI_SLEEP)
668 if (acpi_target_system_state() < ACPI_STATE_S3)
669 opregion_target_state = PCI_D1;
670#endif
671 intel_opregion_notify_adapter(dev, opregion_target_state); 624 intel_opregion_notify_adapter(dev, opregion_target_state);
672 625
673 intel_uncore_forcewake_reset(dev, false); 626 intel_uncore_forcewake_reset(dev, false);
@@ -679,18 +632,35 @@ static int i915_drm_suspend(struct drm_device *dev)
679 632
680 intel_display_set_init_power(dev_priv, false); 633 intel_display_set_init_power(dev_priv, false);
681 634
635 if (HAS_CSR(dev_priv))
636 flush_work(&dev_priv->csr.work);
637
682 return 0; 638 return 0;
683} 639}
684 640
685static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation) 641static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
686{ 642{
687 struct drm_i915_private *dev_priv = drm_dev->dev_private; 643 struct drm_i915_private *dev_priv = drm_dev->dev_private;
644 bool fw_csr;
688 int ret; 645 int ret;
689 646
647 fw_csr = suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
648 /*
649 * In case of firmware assisted context save/restore don't manually
650 * deinit the power domains. This also means the CSR/DMC firmware will
651 * stay active, it will power down any HW resources as required and
652 * also enable deeper system power states that would be blocked if the
653 * firmware was inactive.
654 */
655 if (!fw_csr)
656 intel_power_domains_suspend(dev_priv);
657
690 ret = intel_suspend_complete(dev_priv); 658 ret = intel_suspend_complete(dev_priv);
691 659
692 if (ret) { 660 if (ret) {
693 DRM_ERROR("Suspend complete failed: %d\n", ret); 661 DRM_ERROR("Suspend complete failed: %d\n", ret);
662 if (!fw_csr)
663 intel_power_domains_init_hw(dev_priv, true);
694 664
695 return ret; 665 return ret;
696 } 666 }
@@ -711,6 +681,8 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
711 if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6)) 681 if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
712 pci_set_power_state(drm_dev->pdev, PCI_D3hot); 682 pci_set_power_state(drm_dev->pdev, PCI_D3hot);
713 683
684 dev_priv->suspended_to_idle = suspend_to_idle(dev_priv);
685
714 return 0; 686 return 0;
715} 687}
716 688
@@ -823,8 +795,10 @@ static int i915_drm_resume_early(struct drm_device *dev)
823 * FIXME: This should be solved with a special hdmi sink device or 795 * FIXME: This should be solved with a special hdmi sink device or
824 * similar so that power domains can be employed. 796 * similar so that power domains can be employed.
825 */ 797 */
826 if (pci_enable_device(dev->pdev)) 798 if (pci_enable_device(dev->pdev)) {
827 return -EIO; 799 ret = -EIO;
800 goto out;
801 }
828 802
829 pci_set_master(dev->pdev); 803 pci_set_master(dev->pdev);
830 804
@@ -838,13 +812,16 @@ static int i915_drm_resume_early(struct drm_device *dev)
838 812
839 if (IS_BROXTON(dev)) 813 if (IS_BROXTON(dev))
840 ret = bxt_resume_prepare(dev_priv); 814 ret = bxt_resume_prepare(dev_priv);
841 else if (IS_SKYLAKE(dev_priv))
842 ret = skl_resume_prepare(dev_priv);
843 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 815 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
844 hsw_disable_pc8(dev_priv); 816 hsw_disable_pc8(dev_priv);
845 817
846 intel_uncore_sanitize(dev); 818 intel_uncore_sanitize(dev);
847 intel_power_domains_init_hw(dev_priv); 819
820 if (!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
821 intel_power_domains_init_hw(dev_priv, true);
822
823out:
824 dev_priv->suspended_to_idle = false;
848 825
849 return ret; 826 return ret;
850} 827}
@@ -1051,15 +1028,6 @@ static int i915_pm_resume(struct device *dev)
1051 return i915_drm_resume(drm_dev); 1028 return i915_drm_resume(drm_dev);
1052} 1029}
1053 1030
1054static int skl_suspend_complete(struct drm_i915_private *dev_priv)
1055{
1056 /* Enabling DC6 is not a hard requirement to enter runtime D3 */
1057
1058 skl_uninit_cdclk(dev_priv);
1059
1060 return 0;
1061}
1062
1063static int hsw_suspend_complete(struct drm_i915_private *dev_priv) 1031static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
1064{ 1032{
1065 hsw_enable_pc8(dev_priv); 1033 hsw_enable_pc8(dev_priv);
@@ -1099,16 +1067,6 @@ static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
1099 return 0; 1067 return 0;
1100} 1068}
1101 1069
1102static int skl_resume_prepare(struct drm_i915_private *dev_priv)
1103{
1104 struct drm_device *dev = dev_priv->dev;
1105
1106 skl_init_cdclk(dev_priv);
1107 intel_csr_load_program(dev);
1108
1109 return 0;
1110}
1111
1112/* 1070/*
1113 * Save all Gunit registers that may be lost after a D3 and a subsequent 1071 * Save all Gunit registers that may be lost after a D3 and a subsequent
1114 * S0i[R123] transition. The list of registers needing a save/restore is 1072 * S0i[R123] transition. The list of registers needing a save/restore is
@@ -1572,8 +1530,6 @@ static int intel_runtime_resume(struct device *device)
1572 1530
1573 if (IS_BROXTON(dev)) 1531 if (IS_BROXTON(dev))
1574 ret = bxt_resume_prepare(dev_priv); 1532 ret = bxt_resume_prepare(dev_priv);
1575 else if (IS_SKYLAKE(dev))
1576 ret = skl_resume_prepare(dev_priv);
1577 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 1533 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1578 hsw_disable_pc8(dev_priv); 1534 hsw_disable_pc8(dev_priv);
1579 else if (IS_VALLEYVIEW(dev_priv)) 1535 else if (IS_VALLEYVIEW(dev_priv))
@@ -1616,8 +1572,6 @@ static int intel_suspend_complete(struct drm_i915_private *dev_priv)
1616 1572
1617 if (IS_BROXTON(dev_priv)) 1573 if (IS_BROXTON(dev_priv))
1618 ret = bxt_suspend_complete(dev_priv); 1574 ret = bxt_suspend_complete(dev_priv);
1619 else if (IS_SKYLAKE(dev_priv))
1620 ret = skl_suspend_complete(dev_priv);
1621 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 1575 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1622 ret = hsw_suspend_complete(dev_priv); 1576 ret = hsw_suspend_complete(dev_priv);
1623 else if (IS_VALLEYVIEW(dev_priv)) 1577 else if (IS_VALLEYVIEW(dev_priv))
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8afda459a26e..f1a8a53e9e30 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -57,7 +57,7 @@
57 57
58#define DRIVER_NAME "i915" 58#define DRIVER_NAME "i915"
59#define DRIVER_DESC "Intel Graphics" 59#define DRIVER_DESC "Intel Graphics"
60#define DRIVER_DATE "20151010" 60#define DRIVER_DATE "20151204"
61 61
62#undef WARN_ON 62#undef WARN_ON
63/* Many gcc seem to no see through this and fall over :( */ 63/* Many gcc seem to no see through this and fall over :( */
@@ -180,15 +180,11 @@ enum intel_display_power_domain {
180 POWER_DOMAIN_TRANSCODER_B, 180 POWER_DOMAIN_TRANSCODER_B,
181 POWER_DOMAIN_TRANSCODER_C, 181 POWER_DOMAIN_TRANSCODER_C,
182 POWER_DOMAIN_TRANSCODER_EDP, 182 POWER_DOMAIN_TRANSCODER_EDP,
183 POWER_DOMAIN_PORT_DDI_A_2_LANES, 183 POWER_DOMAIN_PORT_DDI_A_LANES,
184 POWER_DOMAIN_PORT_DDI_A_4_LANES, 184 POWER_DOMAIN_PORT_DDI_B_LANES,
185 POWER_DOMAIN_PORT_DDI_B_2_LANES, 185 POWER_DOMAIN_PORT_DDI_C_LANES,
186 POWER_DOMAIN_PORT_DDI_B_4_LANES, 186 POWER_DOMAIN_PORT_DDI_D_LANES,
187 POWER_DOMAIN_PORT_DDI_C_2_LANES, 187 POWER_DOMAIN_PORT_DDI_E_LANES,
188 POWER_DOMAIN_PORT_DDI_C_4_LANES,
189 POWER_DOMAIN_PORT_DDI_D_2_LANES,
190 POWER_DOMAIN_PORT_DDI_D_4_LANES,
191 POWER_DOMAIN_PORT_DDI_E_2_LANES,
192 POWER_DOMAIN_PORT_DSI, 188 POWER_DOMAIN_PORT_DSI,
193 POWER_DOMAIN_PORT_CRT, 189 POWER_DOMAIN_PORT_CRT,
194 POWER_DOMAIN_PORT_OTHER, 190 POWER_DOMAIN_PORT_OTHER,
@@ -199,6 +195,8 @@ enum intel_display_power_domain {
199 POWER_DOMAIN_AUX_B, 195 POWER_DOMAIN_AUX_B,
200 POWER_DOMAIN_AUX_C, 196 POWER_DOMAIN_AUX_C,
201 POWER_DOMAIN_AUX_D, 197 POWER_DOMAIN_AUX_D,
198 POWER_DOMAIN_GMBUS,
199 POWER_DOMAIN_MODESET,
202 POWER_DOMAIN_INIT, 200 POWER_DOMAIN_INIT,
203 201
204 POWER_DOMAIN_NUM, 202 POWER_DOMAIN_NUM,
@@ -288,7 +286,7 @@ struct i915_hotplug {
288 list_for_each_entry(intel_plane, \ 286 list_for_each_entry(intel_plane, \
289 &(dev)->mode_config.plane_list, \ 287 &(dev)->mode_config.plane_list, \
290 base.head) \ 288 base.head) \
291 if ((intel_plane)->pipe == (intel_crtc)->pipe) 289 for_each_if ((intel_plane)->pipe == (intel_crtc)->pipe)
292 290
293#define for_each_intel_crtc(dev, intel_crtc) \ 291#define for_each_intel_crtc(dev, intel_crtc) \
294 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) 292 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
@@ -305,15 +303,15 @@ struct i915_hotplug {
305 303
306#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ 304#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
307 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ 305 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
308 if ((intel_encoder)->base.crtc == (__crtc)) 306 for_each_if ((intel_encoder)->base.crtc == (__crtc))
309 307
310#define for_each_connector_on_encoder(dev, __encoder, intel_connector) \ 308#define for_each_connector_on_encoder(dev, __encoder, intel_connector) \
311 list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \ 309 list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
312 if ((intel_connector)->base.encoder == (__encoder)) 310 for_each_if ((intel_connector)->base.encoder == (__encoder))
313 311
314#define for_each_power_domain(domain, mask) \ 312#define for_each_power_domain(domain, mask) \
315 for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ 313 for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
316 if ((1 << (domain)) & (mask)) 314 for_each_if ((1 << (domain)) & (mask))
317 315
318struct drm_i915_private; 316struct drm_i915_private;
319struct i915_mm_struct; 317struct i915_mm_struct;
@@ -351,6 +349,8 @@ enum intel_dpll_id {
351 /* hsw/bdw */ 349 /* hsw/bdw */
352 DPLL_ID_WRPLL1 = 0, 350 DPLL_ID_WRPLL1 = 0,
353 DPLL_ID_WRPLL2 = 1, 351 DPLL_ID_WRPLL2 = 1,
352 DPLL_ID_SPLL = 2,
353
354 /* skl */ 354 /* skl */
355 DPLL_ID_SKL_DPLL1 = 0, 355 DPLL_ID_SKL_DPLL1 = 0,
356 DPLL_ID_SKL_DPLL2 = 1, 356 DPLL_ID_SKL_DPLL2 = 1,
@@ -367,6 +367,7 @@ struct intel_dpll_hw_state {
367 367
368 /* hsw, bdw */ 368 /* hsw, bdw */
369 uint32_t wrpll; 369 uint32_t wrpll;
370 uint32_t spll;
370 371
371 /* skl */ 372 /* skl */
372 /* 373 /*
@@ -627,11 +628,9 @@ struct drm_i915_display_funcs {
627 int target, int refclk, 628 int target, int refclk,
628 struct dpll *match_clock, 629 struct dpll *match_clock,
629 struct dpll *best_clock); 630 struct dpll *best_clock);
631 int (*compute_pipe_wm)(struct intel_crtc *crtc,
632 struct drm_atomic_state *state);
630 void (*update_wm)(struct drm_crtc *crtc); 633 void (*update_wm)(struct drm_crtc *crtc);
631 void (*update_sprite_wm)(struct drm_plane *plane,
632 struct drm_crtc *crtc,
633 uint32_t sprite_width, uint32_t sprite_height,
634 int pixel_size, bool enable, bool scaled);
635 int (*modeset_calc_cdclk)(struct drm_atomic_state *state); 634 int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
636 void (*modeset_commit_cdclk)(struct drm_atomic_state *state); 635 void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
637 /* Returns the active state of the crtc, and if the crtc is active, 636 /* Returns the active state of the crtc, and if the crtc is active,
@@ -689,18 +688,18 @@ struct intel_uncore_funcs {
689 void (*force_wake_put)(struct drm_i915_private *dev_priv, 688 void (*force_wake_put)(struct drm_i915_private *dev_priv,
690 enum forcewake_domains domains); 689 enum forcewake_domains domains);
691 690
692 uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace); 691 uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
693 uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace); 692 uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
694 uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, off_t offset, bool trace); 693 uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
695 uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, off_t offset, bool trace); 694 uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
696 695
697 void (*mmio_writeb)(struct drm_i915_private *dev_priv, off_t offset, 696 void (*mmio_writeb)(struct drm_i915_private *dev_priv, i915_reg_t r,
698 uint8_t val, bool trace); 697 uint8_t val, bool trace);
699 void (*mmio_writew)(struct drm_i915_private *dev_priv, off_t offset, 698 void (*mmio_writew)(struct drm_i915_private *dev_priv, i915_reg_t r,
700 uint16_t val, bool trace); 699 uint16_t val, bool trace);
701 void (*mmio_writel)(struct drm_i915_private *dev_priv, off_t offset, 700 void (*mmio_writel)(struct drm_i915_private *dev_priv, i915_reg_t r,
702 uint32_t val, bool trace); 701 uint32_t val, bool trace);
703 void (*mmio_writeq)(struct drm_i915_private *dev_priv, off_t offset, 702 void (*mmio_writeq)(struct drm_i915_private *dev_priv, i915_reg_t r,
704 uint64_t val, bool trace); 703 uint64_t val, bool trace);
705}; 704};
706 705
@@ -717,11 +716,11 @@ struct intel_uncore {
717 enum forcewake_domain_id id; 716 enum forcewake_domain_id id;
718 unsigned wake_count; 717 unsigned wake_count;
719 struct timer_list timer; 718 struct timer_list timer;
720 u32 reg_set; 719 i915_reg_t reg_set;
721 u32 val_set; 720 u32 val_set;
722 u32 val_clear; 721 u32 val_clear;
723 u32 reg_ack; 722 i915_reg_t reg_ack;
724 u32 reg_post; 723 i915_reg_t reg_post;
725 u32 val_reset; 724 u32 val_reset;
726 } fw_domain[FW_DOMAIN_ID_COUNT]; 725 } fw_domain[FW_DOMAIN_ID_COUNT];
727}; 726};
@@ -731,25 +730,24 @@ struct intel_uncore {
731 for ((i__) = 0, (domain__) = &(dev_priv__)->uncore.fw_domain[0]; \ 730 for ((i__) = 0, (domain__) = &(dev_priv__)->uncore.fw_domain[0]; \
732 (i__) < FW_DOMAIN_ID_COUNT; \ 731 (i__) < FW_DOMAIN_ID_COUNT; \
733 (i__)++, (domain__) = &(dev_priv__)->uncore.fw_domain[i__]) \ 732 (i__)++, (domain__) = &(dev_priv__)->uncore.fw_domain[i__]) \
734 if (((mask__) & (dev_priv__)->uncore.fw_domains) & (1 << (i__))) 733 for_each_if (((mask__) & (dev_priv__)->uncore.fw_domains) & (1 << (i__)))
735 734
736#define for_each_fw_domain(domain__, dev_priv__, i__) \ 735#define for_each_fw_domain(domain__, dev_priv__, i__) \
737 for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__) 736 for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__)
738 737
739enum csr_state { 738#define CSR_VERSION(major, minor) ((major) << 16 | (minor))
740 FW_UNINITIALIZED = 0, 739#define CSR_VERSION_MAJOR(version) ((version) >> 16)
741 FW_LOADED, 740#define CSR_VERSION_MINOR(version) ((version) & 0xffff)
742 FW_FAILED
743};
744 741
745struct intel_csr { 742struct intel_csr {
743 struct work_struct work;
746 const char *fw_path; 744 const char *fw_path;
747 uint32_t *dmc_payload; 745 uint32_t *dmc_payload;
748 uint32_t dmc_fw_size; 746 uint32_t dmc_fw_size;
747 uint32_t version;
749 uint32_t mmio_count; 748 uint32_t mmio_count;
750 uint32_t mmioaddr[8]; 749 i915_reg_t mmioaddr[8];
751 uint32_t mmiodata[8]; 750 uint32_t mmiodata[8];
752 enum csr_state state;
753}; 751};
754 752
755#define DEV_INFO_FOR_EACH_FLAG(func, sep) \ 753#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
@@ -767,6 +765,8 @@ struct intel_csr {
767 func(is_valleyview) sep \ 765 func(is_valleyview) sep \
768 func(is_haswell) sep \ 766 func(is_haswell) sep \
769 func(is_skylake) sep \ 767 func(is_skylake) sep \
768 func(is_broxton) sep \
769 func(is_kabylake) sep \
770 func(is_preliminary) sep \ 770 func(is_preliminary) sep \
771 func(has_fbc) sep \ 771 func(has_fbc) sep \
772 func(has_pipe_cxsr) sep \ 772 func(has_pipe_cxsr) sep \
@@ -902,7 +902,6 @@ struct i915_fbc {
902 /* This is always the inner lock when overlapping with struct_mutex and 902 /* This is always the inner lock when overlapping with struct_mutex and
903 * it's the outer lock when overlapping with stolen_lock. */ 903 * it's the outer lock when overlapping with stolen_lock. */
904 struct mutex lock; 904 struct mutex lock;
905 unsigned long uncompressed_size;
906 unsigned threshold; 905 unsigned threshold;
907 unsigned int fb_id; 906 unsigned int fb_id;
908 unsigned int possible_framebuffer_bits; 907 unsigned int possible_framebuffer_bits;
@@ -915,38 +914,21 @@ struct i915_fbc {
915 914
916 bool false_color; 915 bool false_color;
917 916
918 /* Tracks whether the HW is actually enabled, not whether the feature is
919 * possible. */
920 bool enabled; 917 bool enabled;
918 bool active;
921 919
922 struct intel_fbc_work { 920 struct intel_fbc_work {
923 struct delayed_work work; 921 bool scheduled;
924 struct intel_crtc *crtc; 922 struct work_struct work;
925 struct drm_framebuffer *fb; 923 struct drm_framebuffer *fb;
926 } *fbc_work; 924 unsigned long enable_jiffies;
927 925 } work;
928 enum no_fbc_reason { 926
929 FBC_OK, /* FBC is enabled */ 927 const char *no_fbc_reason;
930 FBC_UNSUPPORTED, /* FBC is not supported by this chipset */ 928
931 FBC_NO_OUTPUT, /* no outputs enabled to compress */ 929 bool (*is_active)(struct drm_i915_private *dev_priv);
932 FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */ 930 void (*activate)(struct intel_crtc *crtc);
933 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */ 931 void (*deactivate)(struct drm_i915_private *dev_priv);
934 FBC_MODE_TOO_LARGE, /* mode too large for compression */
935 FBC_BAD_PLANE, /* fbc not supported on plane */
936 FBC_NOT_TILED, /* buffer not tiled */
937 FBC_MULTIPLE_PIPES, /* more than one pipe active */
938 FBC_MODULE_PARAM,
939 FBC_CHIP_DEFAULT, /* disabled by default on this chip */
940 FBC_ROTATION, /* rotation is not supported */
941 FBC_IN_DBG_MASTER, /* kernel debugger is active */
942 FBC_BAD_STRIDE, /* stride is not supported */
943 FBC_PIXEL_RATE, /* pixel rate is too big */
944 FBC_PIXEL_FORMAT /* pixel format is invalid */
945 } no_fbc_reason;
946
947 bool (*fbc_enabled)(struct drm_i915_private *dev_priv);
948 void (*enable_fbc)(struct intel_crtc *crtc);
949 void (*disable_fbc)(struct drm_i915_private *dev_priv);
950}; 932};
951 933
952/** 934/**
@@ -1016,7 +998,7 @@ struct intel_gmbus {
1016 struct i2c_adapter adapter; 998 struct i2c_adapter adapter;
1017 u32 force_bit; 999 u32 force_bit;
1018 u32 reg0; 1000 u32 reg0;
1019 u32 gpio_reg; 1001 i915_reg_t gpio_reg;
1020 struct i2c_algo_bit_data bit_algo; 1002 struct i2c_algo_bit_data bit_algo;
1021 struct drm_i915_private *dev_priv; 1003 struct drm_i915_private *dev_priv;
1022}; 1004};
@@ -1665,7 +1647,7 @@ struct i915_frontbuffer_tracking {
1665}; 1647};
1666 1648
1667struct i915_wa_reg { 1649struct i915_wa_reg {
1668 u32 addr; 1650 i915_reg_t addr;
1669 u32 value; 1651 u32 value;
1670 /* bitmask representing WA bits */ 1652 /* bitmask representing WA bits */
1671 u32 mask; 1653 u32 mask;
@@ -1694,6 +1676,13 @@ struct i915_execbuffer_params {
1694 struct drm_i915_gem_request *request; 1676 struct drm_i915_gem_request *request;
1695}; 1677};
1696 1678
1679/* used in computing the new watermarks state */
1680struct intel_wm_config {
1681 unsigned int num_pipes_active;
1682 bool sprites_enabled;
1683 bool sprites_scaled;
1684};
1685
1697struct drm_i915_private { 1686struct drm_i915_private {
1698 struct drm_device *dev; 1687 struct drm_device *dev;
1699 struct kmem_cache *objects; 1688 struct kmem_cache *objects;
@@ -1714,9 +1703,6 @@ struct drm_i915_private {
1714 1703
1715 struct intel_csr csr; 1704 struct intel_csr csr;
1716 1705
1717 /* Display CSR-related protection */
1718 struct mutex csr_lock;
1719
1720 struct intel_gmbus gmbus[GMBUS_NUM_PINS]; 1706 struct intel_gmbus gmbus[GMBUS_NUM_PINS];
1721 1707
1722 /** gmbus_mutex protects against concurrent usage of the single hw gmbus 1708 /** gmbus_mutex protects against concurrent usage of the single hw gmbus
@@ -1731,6 +1717,8 @@ struct drm_i915_private {
1731 /* MMIO base address for MIPI regs */ 1717 /* MMIO base address for MIPI regs */
1732 uint32_t mipi_mmio_base; 1718 uint32_t mipi_mmio_base;
1733 1719
1720 uint32_t psr_mmio_base;
1721
1734 wait_queue_head_t gmbus_wait_queue; 1722 wait_queue_head_t gmbus_wait_queue;
1735 1723
1736 struct pci_dev *bridge_dev; 1724 struct pci_dev *bridge_dev;
@@ -1896,6 +1884,7 @@ struct drm_i915_private {
1896 u32 chv_phy_control; 1884 u32 chv_phy_control;
1897 1885
1898 u32 suspend_count; 1886 u32 suspend_count;
1887 bool suspended_to_idle;
1899 struct i915_suspend_saved_registers regfile; 1888 struct i915_suspend_saved_registers regfile;
1900 struct vlv_s0ix_state vlv_s0ix_state; 1889 struct vlv_s0ix_state vlv_s0ix_state;
1901 1890
@@ -1918,6 +1907,9 @@ struct drm_i915_private {
1918 */ 1907 */
1919 uint16_t skl_latency[8]; 1908 uint16_t skl_latency[8];
1920 1909
1910 /* Committed wm config */
1911 struct intel_wm_config config;
1912
1921 /* 1913 /*
1922 * The skl_wm_values structure is a bit too big for stack 1914 * The skl_wm_values structure is a bit too big for stack
1923 * allocation, so we keep the staging struct where we store 1915 * allocation, so we keep the staging struct where we store
@@ -1976,7 +1968,7 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
1976/* Iterate over initialised rings */ 1968/* Iterate over initialised rings */
1977#define for_each_ring(ring__, dev_priv__, i__) \ 1969#define for_each_ring(ring__, dev_priv__, i__) \
1978 for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \ 1970 for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
1979 if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__))) 1971 for_each_if ((((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__))))
1980 1972
1981enum hdmi_force_audio { 1973enum hdmi_force_audio {
1982 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ 1974 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
@@ -2432,6 +2424,15 @@ struct drm_i915_cmd_table {
2432#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id) 2424#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
2433#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision) 2425#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision)
2434 2426
2427#define REVID_FOREVER 0xff
2428/*
2429 * Return true if revision is in range [since,until] inclusive.
2430 *
2431 * Use 0 for open-ended since, and REVID_FOREVER for open-ended until.
2432 */
2433#define IS_REVID(p, since, until) \
2434 (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
2435
2435#define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577) 2436#define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577)
2436#define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562) 2437#define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562)
2437#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) 2438#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
@@ -2458,7 +2459,8 @@ struct drm_i915_cmd_table {
2458#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) 2459#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
2459#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) 2460#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
2460#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake) 2461#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake)
2461#define IS_BROXTON(dev) (!INTEL_INFO(dev)->is_skylake && IS_GEN9(dev)) 2462#define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton)
2463#define IS_KABYLAKE(dev) (INTEL_INFO(dev)->is_kabylake)
2462#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 2464#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
2463#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ 2465#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
2464 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00) 2466 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
@@ -2493,16 +2495,21 @@ struct drm_i915_cmd_table {
2493 2495
2494#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) 2496#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
2495 2497
2496#define SKL_REVID_A0 (0x0) 2498#define SKL_REVID_A0 0x0
2497#define SKL_REVID_B0 (0x1) 2499#define SKL_REVID_B0 0x1
2498#define SKL_REVID_C0 (0x2) 2500#define SKL_REVID_C0 0x2
2499#define SKL_REVID_D0 (0x3) 2501#define SKL_REVID_D0 0x3
2500#define SKL_REVID_E0 (0x4) 2502#define SKL_REVID_E0 0x4
2501#define SKL_REVID_F0 (0x5) 2503#define SKL_REVID_F0 0x5
2504
2505#define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until))
2502 2506
2503#define BXT_REVID_A0 (0x0) 2507#define BXT_REVID_A0 0x0
2504#define BXT_REVID_B0 (0x3) 2508#define BXT_REVID_A1 0x1
2505#define BXT_REVID_C0 (0x9) 2509#define BXT_REVID_B0 0x3
2510#define BXT_REVID_C0 0x9
2511
2512#define IS_BXT_REVID(p, since, until) (IS_BROXTON(p) && IS_REVID(p, since, until))
2506 2513
2507/* 2514/*
2508 * The genX designation typically refers to the render engine, so render 2515 * The genX designation typically refers to the render engine, so render
@@ -2574,10 +2581,10 @@ struct drm_i915_cmd_table {
2574#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) 2581#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
2575#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \ 2582#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \
2576 IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \ 2583 IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \
2577 IS_SKYLAKE(dev)) 2584 IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
2578#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \ 2585#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \
2579 IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \ 2586 IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \
2580 IS_SKYLAKE(dev)) 2587 IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
2581#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) 2588#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
2582#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) 2589#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
2583 2590
@@ -2601,11 +2608,13 @@ struct drm_i915_cmd_table {
2601#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 2608#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
2602#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 2609#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
2603#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 2610#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
2611#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
2604 2612
2605#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type) 2613#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
2606#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT) 2614#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
2607#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) 2615#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
2608#define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) 2616#define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
2617#define HAS_PCH_LPT_H(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE)
2609#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 2618#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
2610#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) 2619#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
2611#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP) 2620#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
@@ -2637,6 +2646,7 @@ struct i915_params {
2637 int panel_use_ssc; 2646 int panel_use_ssc;
2638 int vbt_sdvo_panel_type; 2647 int vbt_sdvo_panel_type;
2639 int enable_rc6; 2648 int enable_rc6;
2649 int enable_dc;
2640 int enable_fbc; 2650 int enable_fbc;
2641 int enable_ppgtt; 2651 int enable_ppgtt;
2642 int enable_execlists; 2652 int enable_execlists;
@@ -2648,6 +2658,7 @@ struct i915_params {
2648 int enable_cmd_parser; 2658 int enable_cmd_parser;
2649 /* leave bools at the end to not create holes */ 2659 /* leave bools at the end to not create holes */
2650 bool enable_hangcheck; 2660 bool enable_hangcheck;
2661 bool fastboot;
2651 bool prefault_disable; 2662 bool prefault_disable;
2652 bool load_detect_test; 2663 bool load_detect_test;
2653 bool reset; 2664 bool reset;
@@ -2684,7 +2695,6 @@ extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
2684extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); 2695extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
2685extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); 2696extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
2686int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); 2697int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
2687void i915_firmware_load_error_print(const char *fw_path, int err);
2688 2698
2689/* intel_hotplug.c */ 2699/* intel_hotplug.c */
2690void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask); 2700void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask);
@@ -2741,17 +2751,47 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
2741void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 2751void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
2742 uint32_t mask, 2752 uint32_t mask,
2743 uint32_t bits); 2753 uint32_t bits);
2744void 2754void ilk_update_display_irq(struct drm_i915_private *dev_priv,
2745ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask); 2755 uint32_t interrupt_mask,
2746void 2756 uint32_t enabled_irq_mask);
2747ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask); 2757static inline void
2758ilk_enable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits)
2759{
2760 ilk_update_display_irq(dev_priv, bits, bits);
2761}
2762static inline void
2763ilk_disable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits)
2764{
2765 ilk_update_display_irq(dev_priv, bits, 0);
2766}
2767void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
2768 enum pipe pipe,
2769 uint32_t interrupt_mask,
2770 uint32_t enabled_irq_mask);
2771static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv,
2772 enum pipe pipe, uint32_t bits)
2773{
2774 bdw_update_pipe_irq(dev_priv, pipe, bits, bits);
2775}
2776static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv,
2777 enum pipe pipe, uint32_t bits)
2778{
2779 bdw_update_pipe_irq(dev_priv, pipe, bits, 0);
2780}
2748void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 2781void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
2749 uint32_t interrupt_mask, 2782 uint32_t interrupt_mask,
2750 uint32_t enabled_irq_mask); 2783 uint32_t enabled_irq_mask);
2751#define ibx_enable_display_interrupt(dev_priv, bits) \ 2784static inline void
2752 ibx_display_interrupt_update((dev_priv), (bits), (bits)) 2785ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
2753#define ibx_disable_display_interrupt(dev_priv, bits) \ 2786{
2754 ibx_display_interrupt_update((dev_priv), (bits), 0) 2787 ibx_display_interrupt_update(dev_priv, bits, bits);
2788}
2789static inline void
2790ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
2791{
2792 ibx_display_interrupt_update(dev_priv, bits, 0);
2793}
2794
2755 2795
2756/* i915_gem.c */ 2796/* i915_gem.c */
2757int i915_gem_create_ioctl(struct drm_device *dev, void *data, 2797int i915_gem_create_ioctl(struct drm_device *dev, void *data,
@@ -2991,8 +3031,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
2991int __must_check 3031int __must_check
2992i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 3032i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
2993 u32 alignment, 3033 u32 alignment,
2994 struct intel_engine_cs *pipelined,
2995 struct drm_i915_gem_request **pipelined_request,
2996 const struct i915_ggtt_view *view); 3034 const struct i915_ggtt_view *view);
2997void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj, 3035void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
2998 const struct i915_ggtt_view *view); 3036 const struct i915_ggtt_view *view);
@@ -3347,7 +3385,6 @@ extern void intel_set_rps(struct drm_device *dev, u8 val);
3347extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, 3385extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
3348 bool enable); 3386 bool enable);
3349extern void intel_detect_pch(struct drm_device *dev); 3387extern void intel_detect_pch(struct drm_device *dev);
3350extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
3351extern int intel_enable_rc6(const struct drm_device *dev); 3388extern int intel_enable_rc6(const struct drm_device *dev);
3352 3389
3353extern bool i915_semaphore_is_enabled(struct drm_device *dev); 3390extern bool i915_semaphore_is_enabled(struct drm_device *dev);
@@ -3430,6 +3467,32 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
3430#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) 3467#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
3431#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) 3468#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
3432 3469
3470#define __raw_read(x, s) \
3471static inline uint##x##_t __raw_i915_read##x(struct drm_i915_private *dev_priv, \
3472 i915_reg_t reg) \
3473{ \
3474 return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \
3475}
3476
3477#define __raw_write(x, s) \
3478static inline void __raw_i915_write##x(struct drm_i915_private *dev_priv, \
3479 i915_reg_t reg, uint##x##_t val) \
3480{ \
3481 write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \
3482}
3483__raw_read(8, b)
3484__raw_read(16, w)
3485__raw_read(32, l)
3486__raw_read(64, q)
3487
3488__raw_write(8, b)
3489__raw_write(16, w)
3490__raw_write(32, l)
3491__raw_write(64, q)
3492
3493#undef __raw_read
3494#undef __raw_write
3495
3433/* These are untraced mmio-accessors that are only valid to be used inside 3496/* These are untraced mmio-accessors that are only valid to be used inside
3434 * criticial sections inside IRQ handlers where forcewake is explicitly 3497 * criticial sections inside IRQ handlers where forcewake is explicitly
3435 * controlled. 3498 * controlled.
@@ -3437,8 +3500,8 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
3437 * Note: Should only be used between intel_uncore_forcewake_irqlock() and 3500 * Note: Should only be used between intel_uncore_forcewake_irqlock() and
3438 * intel_uncore_forcewake_irqunlock(). 3501 * intel_uncore_forcewake_irqunlock().
3439 */ 3502 */
3440#define I915_READ_FW(reg__) readl(dev_priv->regs + (reg__)) 3503#define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__))
3441#define I915_WRITE_FW(reg__, val__) writel(val__, dev_priv->regs + (reg__)) 3504#define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__))
3442#define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__) 3505#define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__)
3443 3506
3444/* "Broadcast RGB" property */ 3507/* "Broadcast RGB" property */
@@ -3446,7 +3509,7 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
3446#define INTEL_BROADCAST_RGB_FULL 1 3509#define INTEL_BROADCAST_RGB_FULL 1
3447#define INTEL_BROADCAST_RGB_LIMITED 2 3510#define INTEL_BROADCAST_RGB_LIMITED 2
3448 3511
3449static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev) 3512static inline i915_reg_t i915_vgacntrl_reg(struct drm_device *dev)
3450{ 3513{
3451 if (IS_VALLEYVIEW(dev)) 3514 if (IS_VALLEYVIEW(dev))
3452 return VLV_VGACNTRL; 3515 return VLV_VGACNTRL;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5cf4a1998273..b7d7cecdddf6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1210,8 +1210,16 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
1210 if (i915_gem_request_completed(req, true)) 1210 if (i915_gem_request_completed(req, true))
1211 return 0; 1211 return 0;
1212 1212
1213 timeout_expire = timeout ? 1213 timeout_expire = 0;
1214 jiffies + nsecs_to_jiffies_timeout((u64)*timeout) : 0; 1214 if (timeout) {
1215 if (WARN_ON(*timeout < 0))
1216 return -EINVAL;
1217
1218 if (*timeout == 0)
1219 return -ETIME;
1220
1221 timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout);
1222 }
1215 1223
1216 if (INTEL_INFO(dev_priv)->gen >= 6) 1224 if (INTEL_INFO(dev_priv)->gen >= 6)
1217 gen6_rps_boost(dev_priv, rps, req->emitted_jiffies); 1225 gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);
@@ -2737,6 +2745,8 @@ static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2737static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, 2745static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2738 struct intel_engine_cs *ring) 2746 struct intel_engine_cs *ring)
2739{ 2747{
2748 struct intel_ringbuffer *buffer;
2749
2740 while (!list_empty(&ring->active_list)) { 2750 while (!list_empty(&ring->active_list)) {
2741 struct drm_i915_gem_object *obj; 2751 struct drm_i915_gem_object *obj;
2742 2752
@@ -2752,18 +2762,23 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2752 * are the ones that keep the context and ringbuffer backing objects 2762 * are the ones that keep the context and ringbuffer backing objects
2753 * pinned in place. 2763 * pinned in place.
2754 */ 2764 */
2755 while (!list_empty(&ring->execlist_queue)) {
2756 struct drm_i915_gem_request *submit_req;
2757 2765
2758 submit_req = list_first_entry(&ring->execlist_queue, 2766 if (i915.enable_execlists) {
2759 struct drm_i915_gem_request, 2767 spin_lock_irq(&ring->execlist_lock);
2760 execlist_link); 2768 while (!list_empty(&ring->execlist_queue)) {
2761 list_del(&submit_req->execlist_link); 2769 struct drm_i915_gem_request *submit_req;
2770
2771 submit_req = list_first_entry(&ring->execlist_queue,
2772 struct drm_i915_gem_request,
2773 execlist_link);
2774 list_del(&submit_req->execlist_link);
2762 2775
2763 if (submit_req->ctx != ring->default_context) 2776 if (submit_req->ctx != ring->default_context)
2764 intel_lr_context_unpin(submit_req); 2777 intel_lr_context_unpin(submit_req);
2765 2778
2766 i915_gem_request_unreference(submit_req); 2779 i915_gem_request_unreference(submit_req);
2780 }
2781 spin_unlock_irq(&ring->execlist_lock);
2767 } 2782 }
2768 2783
2769 /* 2784 /*
@@ -2782,6 +2797,18 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2782 2797
2783 i915_gem_request_retire(request); 2798 i915_gem_request_retire(request);
2784 } 2799 }
2800
2801 /* Having flushed all requests from all queues, we know that all
2802 * ringbuffers must now be empty. However, since we do not reclaim
2803 * all space when retiring the request (to prevent HEADs colliding
2804 * with rapid ringbuffer wraparound) the amount of available space
2805 * upon reset is less than when we start. Do one more pass over
2806 * all the ringbuffers to reset last_retired_head.
2807 */
2808 list_for_each_entry(buffer, &ring->buffers, link) {
2809 buffer->last_retired_head = buffer->tail;
2810 intel_ring_update_space(buffer);
2811 }
2785} 2812}
2786 2813
2787void i915_gem_reset(struct drm_device *dev) 2814void i915_gem_reset(struct drm_device *dev)
@@ -2922,6 +2949,10 @@ i915_gem_idle_work_handler(struct work_struct *work)
2922 if (!list_empty(&ring->request_list)) 2949 if (!list_empty(&ring->request_list))
2923 return; 2950 return;
2924 2951
2952 /* we probably should sync with hangcheck here, using cancel_work_sync.
2953 * Also locking seems to be fubar here, ring->request_list is protected
2954 * by dev->struct_mutex. */
2955
2925 intel_mark_idle(dev); 2956 intel_mark_idle(dev);
2926 2957
2927 if (mutex_trylock(&dev->struct_mutex)) { 2958 if (mutex_trylock(&dev->struct_mutex)) {
@@ -3046,7 +3077,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3046 if (ret == 0) 3077 if (ret == 0)
3047 ret = __i915_wait_request(req[i], reset_counter, true, 3078 ret = __i915_wait_request(req[i], reset_counter, true,
3048 args->timeout_ns > 0 ? &args->timeout_ns : NULL, 3079 args->timeout_ns > 0 ? &args->timeout_ns : NULL,
3049 file->driver_priv); 3080 to_rps_client(file));
3050 i915_gem_request_unreference__unlocked(req[i]); 3081 i915_gem_request_unreference__unlocked(req[i]);
3051 } 3082 }
3052 return ret; 3083 return ret;
@@ -3809,6 +3840,7 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3809int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, 3840int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3810 struct drm_file *file) 3841 struct drm_file *file)
3811{ 3842{
3843 struct drm_i915_private *dev_priv = dev->dev_private;
3812 struct drm_i915_gem_caching *args = data; 3844 struct drm_i915_gem_caching *args = data;
3813 struct drm_i915_gem_object *obj; 3845 struct drm_i915_gem_object *obj;
3814 enum i915_cache_level level; 3846 enum i915_cache_level level;
@@ -3825,7 +3857,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3825 * cacheline, whereas normally such cachelines would get 3857 * cacheline, whereas normally such cachelines would get
3826 * invalidated. 3858 * invalidated.
3827 */ 3859 */
3828 if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) 3860 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
3829 return -ENODEV; 3861 return -ENODEV;
3830 3862
3831 level = I915_CACHE_LLC; 3863 level = I915_CACHE_LLC;
@@ -3837,9 +3869,11 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3837 return -EINVAL; 3869 return -EINVAL;
3838 } 3870 }
3839 3871
3872 intel_runtime_pm_get(dev_priv);
3873
3840 ret = i915_mutex_lock_interruptible(dev); 3874 ret = i915_mutex_lock_interruptible(dev);
3841 if (ret) 3875 if (ret)
3842 return ret; 3876 goto rpm_put;
3843 3877
3844 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 3878 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3845 if (&obj->base == NULL) { 3879 if (&obj->base == NULL) {
@@ -3852,6 +3886,9 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3852 drm_gem_object_unreference(&obj->base); 3886 drm_gem_object_unreference(&obj->base);
3853unlock: 3887unlock:
3854 mutex_unlock(&dev->struct_mutex); 3888 mutex_unlock(&dev->struct_mutex);
3889rpm_put:
3890 intel_runtime_pm_put(dev_priv);
3891
3855 return ret; 3892 return ret;
3856} 3893}
3857 3894
@@ -3863,17 +3900,11 @@ unlock:
3863int 3900int
3864i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 3901i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3865 u32 alignment, 3902 u32 alignment,
3866 struct intel_engine_cs *pipelined,
3867 struct drm_i915_gem_request **pipelined_request,
3868 const struct i915_ggtt_view *view) 3903 const struct i915_ggtt_view *view)
3869{ 3904{
3870 u32 old_read_domains, old_write_domain; 3905 u32 old_read_domains, old_write_domain;
3871 int ret; 3906 int ret;
3872 3907
3873 ret = i915_gem_object_sync(obj, pipelined, pipelined_request);
3874 if (ret)
3875 return ret;
3876
3877 /* Mark the pin_display early so that we account for the 3908 /* Mark the pin_display early so that we account for the
3878 * display coherency whilst setting up the cache domains. 3909 * display coherency whilst setting up the cache domains.
3879 */ 3910 */
@@ -4470,10 +4501,8 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4470{ 4501{
4471 struct i915_vma *vma; 4502 struct i915_vma *vma;
4472 list_for_each_entry(vma, &obj->vma_list, vma_link) { 4503 list_for_each_entry(vma, &obj->vma_list, vma_link) {
4473 if (i915_is_ggtt(vma->vm) && 4504 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
4474 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) 4505 vma->vm == vm)
4475 continue;
4476 if (vma->vm == vm)
4477 return vma; 4506 return vma;
4478 } 4507 }
4479 return NULL; 4508 return NULL;
@@ -4562,7 +4591,6 @@ int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
4562 struct intel_engine_cs *ring = req->ring; 4591 struct intel_engine_cs *ring = req->ring;
4563 struct drm_device *dev = ring->dev; 4592 struct drm_device *dev = ring->dev;
4564 struct drm_i915_private *dev_priv = dev->dev_private; 4593 struct drm_i915_private *dev_priv = dev->dev_private;
4565 u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
4566 u32 *remap_info = dev_priv->l3_parity.remap_info[slice]; 4594 u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
4567 int i, ret; 4595 int i, ret;
4568 4596
@@ -4578,10 +4606,10 @@ int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
4578 * here because no other code should access these registers other than 4606 * here because no other code should access these registers other than
4579 * at initialization time. 4607 * at initialization time.
4580 */ 4608 */
4581 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) { 4609 for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) {
4582 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 4610 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
4583 intel_ring_emit(ring, reg_base + i); 4611 intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i));
4584 intel_ring_emit(ring, remap_info[i/4]); 4612 intel_ring_emit(ring, remap_info[i]);
4585 } 4613 }
4586 4614
4587 intel_ring_advance(ring); 4615 intel_ring_advance(ring);
@@ -4749,18 +4777,9 @@ i915_gem_init_hw(struct drm_device *dev)
4749 if (HAS_GUC_UCODE(dev)) { 4777 if (HAS_GUC_UCODE(dev)) {
4750 ret = intel_guc_ucode_load(dev); 4778 ret = intel_guc_ucode_load(dev);
4751 if (ret) { 4779 if (ret) {
4752 /* 4780 DRM_ERROR("Failed to initialize GuC, error %d\n", ret);
4753 * If we got an error and GuC submission is enabled, map 4781 ret = -EIO;
4754 * the error to -EIO so the GPU will be declared wedged. 4782 goto out;
4755 * OTOH, if we didn't intend to use the GuC anyway, just
4756 * discard the error and carry on.
4757 */
4758 DRM_ERROR("Failed to initialize GuC, error %d%s\n", ret,
4759 i915.enable_guc_submission ? "" :
4760 " (ignored)");
4761 ret = i915.enable_guc_submission ? -EIO : 0;
4762 if (ret)
4763 goto out;
4764 } 4783 }
4765 } 4784 }
4766 4785
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 8c688a5f1589..43761c5bcaca 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -141,8 +141,6 @@ static void i915_gem_context_clean(struct intel_context *ctx)
141 if (!ppgtt) 141 if (!ppgtt)
142 return; 142 return;
143 143
144 WARN_ON(!list_empty(&ppgtt->base.active_list));
145
146 list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list, 144 list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list,
147 mm_list) { 145 mm_list) {
148 if (WARN_ON(__i915_vma_unbind_no_wait(vma))) 146 if (WARN_ON(__i915_vma_unbind_no_wait(vma)))
@@ -556,7 +554,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
556 if (signaller == ring) 554 if (signaller == ring)
557 continue; 555 continue;
558 556
559 intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base)); 557 intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base));
560 intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); 558 intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
561 } 559 }
562 } 560 }
@@ -581,7 +579,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
581 if (signaller == ring) 579 if (signaller == ring)
582 continue; 580 continue;
583 581
584 intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base)); 582 intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base));
585 intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); 583 intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
586 } 584 }
587 } 585 }
@@ -925,6 +923,14 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
925 case I915_CONTEXT_PARAM_NO_ZEROMAP: 923 case I915_CONTEXT_PARAM_NO_ZEROMAP:
926 args->value = ctx->flags & CONTEXT_NO_ZEROMAP; 924 args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
927 break; 925 break;
926 case I915_CONTEXT_PARAM_GTT_SIZE:
927 if (ctx->ppgtt)
928 args->value = ctx->ppgtt->base.total;
929 else if (to_i915(dev)->mm.aliasing_ppgtt)
930 args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total;
931 else
932 args->value = to_i915(dev)->gtt.base.total;
933 break;
928 default: 934 default:
929 ret = -EINVAL; 935 ret = -EINVAL;
930 break; 936 break;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 6ed7d63a0688..a4c243cec4aa 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1114,7 +1114,7 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
1114 1114
1115 for (i = 0; i < 4; i++) { 1115 for (i = 0; i < 4; i++) {
1116 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 1116 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1117 intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i)); 1117 intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
1118 intel_ring_emit(ring, 0); 1118 intel_ring_emit(ring, 0);
1119 } 1119 }
1120 1120
@@ -1241,7 +1241,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
1241 1241
1242 intel_ring_emit(ring, MI_NOOP); 1242 intel_ring_emit(ring, MI_NOOP);
1243 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 1243 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1244 intel_ring_emit(ring, INSTPM); 1244 intel_ring_emit_reg(ring, INSTPM);
1245 intel_ring_emit(ring, instp_mask << 16 | instp_mode); 1245 intel_ring_emit(ring, instp_mask << 16 | instp_mode);
1246 intel_ring_advance(ring); 1246 intel_ring_advance(ring);
1247 1247
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index 40a10b25956c..598198543dcd 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -59,7 +59,7 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
59 struct drm_i915_gem_object *obj) 59 struct drm_i915_gem_object *obj)
60{ 60{
61 struct drm_i915_private *dev_priv = dev->dev_private; 61 struct drm_i915_private *dev_priv = dev->dev_private;
62 int fence_reg_lo, fence_reg_hi; 62 i915_reg_t fence_reg_lo, fence_reg_hi;
63 int fence_pitch_shift; 63 int fence_pitch_shift;
64 64
65 if (INTEL_INFO(dev)->gen >= 6) { 65 if (INTEL_INFO(dev)->gen >= 6) {
@@ -642,11 +642,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
642 } 642 }
643 643
644 /* check for L-shaped memory aka modified enhanced addressing */ 644 /* check for L-shaped memory aka modified enhanced addressing */
645 if (IS_GEN4(dev)) { 645 if (IS_GEN4(dev) &&
646 uint32_t ddc2 = I915_READ(DCC2); 646 !(I915_READ(DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
647 647 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
648 if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE)) 648 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
649 dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
650 } 649 }
651 650
652 if (dcc == 0xffffffff) { 651 if (dcc == 0xffffffff) {
@@ -675,16 +674,35 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
675 * matching, which was the case for the swizzling required in 674 * matching, which was the case for the swizzling required in
676 * the table above, or from the 1-ch value being less than 675 * the table above, or from the 1-ch value being less than
677 * the minimum size of a rank. 676 * the minimum size of a rank.
677 *
678 * Reports indicate that the swizzling actually
679 * varies depending upon page placement inside the
680 * channels, i.e. we see swizzled pages where the
681 * banks of memory are paired and unswizzled on the
682 * uneven portion, so leave that as unknown.
678 */ 683 */
679 if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) { 684 if (I915_READ16(C0DRB3) == I915_READ16(C1DRB3)) {
680 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
681 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
682 } else {
683 swizzle_x = I915_BIT_6_SWIZZLE_9_10; 685 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
684 swizzle_y = I915_BIT_6_SWIZZLE_9; 686 swizzle_y = I915_BIT_6_SWIZZLE_9;
685 } 687 }
686 } 688 }
687 689
690 if (swizzle_x == I915_BIT_6_SWIZZLE_UNKNOWN ||
691 swizzle_y == I915_BIT_6_SWIZZLE_UNKNOWN) {
692 /* Userspace likes to explode if it sees unknown swizzling,
693 * so lie. We will finish the lie when reporting through
694 * the get-tiling-ioctl by reporting the physical swizzle
695 * mode as unknown instead.
696 *
697 * As we don't strictly know what the swizzling is, it may be
698 * bit17 dependent, and so we need to also prevent the pages
699 * from being moved.
700 */
701 dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
702 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
703 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
704 }
705
688 dev_priv->mm.bit_6_swizzle_x = swizzle_x; 706 dev_priv->mm.bit_6_swizzle_x = swizzle_x;
689 dev_priv->mm.bit_6_swizzle_y = swizzle_y; 707 dev_priv->mm.bit_6_swizzle_y = swizzle_y;
690} 708}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 43f35d12b677..1f7e6b9df45d 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -24,6 +24,7 @@
24 */ 24 */
25 25
26#include <linux/seq_file.h> 26#include <linux/seq_file.h>
27#include <linux/stop_machine.h>
27#include <drm/drmP.h> 28#include <drm/drmP.h>
28#include <drm/i915_drm.h> 29#include <drm/i915_drm.h>
29#include "i915_drv.h" 30#include "i915_drv.h"
@@ -104,9 +105,11 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
104{ 105{
105 bool has_aliasing_ppgtt; 106 bool has_aliasing_ppgtt;
106 bool has_full_ppgtt; 107 bool has_full_ppgtt;
108 bool has_full_48bit_ppgtt;
107 109
108 has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6; 110 has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
109 has_full_ppgtt = INTEL_INFO(dev)->gen >= 7; 111 has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
112 has_full_48bit_ppgtt = IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9;
110 113
111 if (intel_vgpu_active(dev)) 114 if (intel_vgpu_active(dev))
112 has_full_ppgtt = false; /* emulation is too hard */ 115 has_full_ppgtt = false; /* emulation is too hard */
@@ -125,6 +128,9 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
125 if (enable_ppgtt == 2 && has_full_ppgtt) 128 if (enable_ppgtt == 2 && has_full_ppgtt)
126 return 2; 129 return 2;
127 130
131 if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
132 return 3;
133
128#ifdef CONFIG_INTEL_IOMMU 134#ifdef CONFIG_INTEL_IOMMU
129 /* Disable ppgtt on SNB if VT-d is on. */ 135 /* Disable ppgtt on SNB if VT-d is on. */
130 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) { 136 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
@@ -141,7 +147,7 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
141 } 147 }
142 148
143 if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists) 149 if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists)
144 return 2; 150 return has_full_48bit_ppgtt ? 3 : 2;
145 else 151 else
146 return has_aliasing_ppgtt ? 1 : 0; 152 return has_aliasing_ppgtt ? 1 : 0;
147} 153}
@@ -661,10 +667,10 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
661 return ret; 667 return ret;
662 668
663 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 669 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
664 intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry)); 670 intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(ring, entry));
665 intel_ring_emit(ring, upper_32_bits(addr)); 671 intel_ring_emit(ring, upper_32_bits(addr));
666 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 672 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
667 intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry)); 673 intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(ring, entry));
668 intel_ring_emit(ring, lower_32_bits(addr)); 674 intel_ring_emit(ring, lower_32_bits(addr));
669 intel_ring_advance(ring); 675 intel_ring_advance(ring);
670 676
@@ -904,14 +910,13 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
904 enum vgt_g2v_type msg; 910 enum vgt_g2v_type msg;
905 struct drm_device *dev = ppgtt->base.dev; 911 struct drm_device *dev = ppgtt->base.dev;
906 struct drm_i915_private *dev_priv = dev->dev_private; 912 struct drm_i915_private *dev_priv = dev->dev_private;
907 unsigned int offset = vgtif_reg(pdp0_lo);
908 int i; 913 int i;
909 914
910 if (USES_FULL_48BIT_PPGTT(dev)) { 915 if (USES_FULL_48BIT_PPGTT(dev)) {
911 u64 daddr = px_dma(&ppgtt->pml4); 916 u64 daddr = px_dma(&ppgtt->pml4);
912 917
913 I915_WRITE(offset, lower_32_bits(daddr)); 918 I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
914 I915_WRITE(offset + 4, upper_32_bits(daddr)); 919 I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
915 920
916 msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE : 921 msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
917 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY); 922 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
@@ -919,10 +924,8 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
919 for (i = 0; i < GEN8_LEGACY_PDPES; i++) { 924 for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
920 u64 daddr = i915_page_dir_dma_addr(ppgtt, i); 925 u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
921 926
922 I915_WRITE(offset, lower_32_bits(daddr)); 927 I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
923 I915_WRITE(offset + 4, upper_32_bits(daddr)); 928 I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
924
925 offset += 8;
926 } 929 }
927 930
928 msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE : 931 msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
@@ -1662,9 +1665,9 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
1662 return ret; 1665 return ret;
1663 1666
1664 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2)); 1667 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
1665 intel_ring_emit(ring, RING_PP_DIR_DCLV(ring)); 1668 intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
1666 intel_ring_emit(ring, PP_DIR_DCLV_2G); 1669 intel_ring_emit(ring, PP_DIR_DCLV_2G);
1667 intel_ring_emit(ring, RING_PP_DIR_BASE(ring)); 1670 intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
1668 intel_ring_emit(ring, get_pd_offset(ppgtt)); 1671 intel_ring_emit(ring, get_pd_offset(ppgtt));
1669 intel_ring_emit(ring, MI_NOOP); 1672 intel_ring_emit(ring, MI_NOOP);
1670 intel_ring_advance(ring); 1673 intel_ring_advance(ring);
@@ -1699,9 +1702,9 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
1699 return ret; 1702 return ret;
1700 1703
1701 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2)); 1704 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
1702 intel_ring_emit(ring, RING_PP_DIR_DCLV(ring)); 1705 intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
1703 intel_ring_emit(ring, PP_DIR_DCLV_2G); 1706 intel_ring_emit(ring, PP_DIR_DCLV_2G);
1704 intel_ring_emit(ring, RING_PP_DIR_BASE(ring)); 1707 intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
1705 intel_ring_emit(ring, get_pd_offset(ppgtt)); 1708 intel_ring_emit(ring, get_pd_offset(ppgtt));
1706 intel_ring_emit(ring, MI_NOOP); 1709 intel_ring_emit(ring, MI_NOOP);
1707 intel_ring_advance(ring); 1710 intel_ring_advance(ring);
@@ -2528,6 +2531,26 @@ static int ggtt_bind_vma(struct i915_vma *vma,
2528 return 0; 2531 return 0;
2529} 2532}
2530 2533
2534struct ggtt_bind_vma__cb {
2535 struct i915_vma *vma;
2536 enum i915_cache_level cache_level;
2537 u32 flags;
2538};
2539
2540static int ggtt_bind_vma__cb(void *_arg)
2541{
2542 struct ggtt_bind_vma__cb *arg = _arg;
2543 return ggtt_bind_vma(arg->vma, arg->cache_level, arg->flags);
2544}
2545
2546static int ggtt_bind_vma__BKL(struct i915_vma *vma,
2547 enum i915_cache_level cache_level,
2548 u32 flags)
2549{
2550 struct ggtt_bind_vma__cb arg = { vma, cache_level, flags };
2551 return stop_machine(ggtt_bind_vma__cb, &arg, NULL);
2552}
2553
2531static int aliasing_gtt_bind_vma(struct i915_vma *vma, 2554static int aliasing_gtt_bind_vma(struct i915_vma *vma,
2532 enum i915_cache_level cache_level, 2555 enum i915_cache_level cache_level,
2533 u32 flags) 2556 u32 flags)
@@ -2995,6 +3018,9 @@ static int gen8_gmch_probe(struct drm_device *dev,
2995 dev_priv->gtt.base.bind_vma = ggtt_bind_vma; 3018 dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
2996 dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma; 3019 dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
2997 3020
3021 if (IS_CHERRYVIEW(dev))
3022 dev_priv->gtt.base.bind_vma = ggtt_bind_vma__BKL;
3023
2998 return ret; 3024 return ret;
2999} 3025}
3000 3026
@@ -3302,7 +3328,7 @@ static struct sg_table *
3302intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view, 3328intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
3303 struct drm_i915_gem_object *obj) 3329 struct drm_i915_gem_object *obj)
3304{ 3330{
3305 struct intel_rotation_info *rot_info = &ggtt_view->rotation_info; 3331 struct intel_rotation_info *rot_info = &ggtt_view->params.rotation_info;
3306 unsigned int size_pages = rot_info->size >> PAGE_SHIFT; 3332 unsigned int size_pages = rot_info->size >> PAGE_SHIFT;
3307 unsigned int size_pages_uv; 3333 unsigned int size_pages_uv;
3308 struct sg_page_iter sg_iter; 3334 struct sg_page_iter sg_iter;
@@ -3534,7 +3560,7 @@ i915_ggtt_view_size(struct drm_i915_gem_object *obj,
3534 if (view->type == I915_GGTT_VIEW_NORMAL) { 3560 if (view->type == I915_GGTT_VIEW_NORMAL) {
3535 return obj->base.size; 3561 return obj->base.size;
3536 } else if (view->type == I915_GGTT_VIEW_ROTATED) { 3562 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
3537 return view->rotation_info.size; 3563 return view->params.rotation_info.size;
3538 } else if (view->type == I915_GGTT_VIEW_PARTIAL) { 3564 } else if (view->type == I915_GGTT_VIEW_PARTIAL) {
3539 return view->params.partial.size << PAGE_SHIFT; 3565 return view->params.partial.size << PAGE_SHIFT;
3540 } else { 3566 } else {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index a216397ead52..877c32c78a6a 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -156,13 +156,10 @@ struct i915_ggtt_view {
156 u64 offset; 156 u64 offset;
157 unsigned int size; 157 unsigned int size;
158 } partial; 158 } partial;
159 struct intel_rotation_info rotation_info;
159 } params; 160 } params;
160 161
161 struct sg_table *pages; 162 struct sg_table *pages;
162
163 union {
164 struct intel_rotation_info rotation_info;
165 };
166}; 163};
167 164
168extern const struct i915_ggtt_view i915_ggtt_view_normal; 165extern const struct i915_ggtt_view i915_ggtt_view_normal;
@@ -556,7 +553,7 @@ i915_ggtt_view_equal(const struct i915_ggtt_view *a,
556 553
557 if (a->type != b->type) 554 if (a->type != b->type)
558 return false; 555 return false;
559 if (a->type == I915_GGTT_VIEW_PARTIAL) 556 if (a->type != I915_GGTT_VIEW_NORMAL)
560 return !memcmp(&a->params, &b->params, sizeof(a->params)); 557 return !memcmp(&a->params, &b->params, sizeof(a->params));
561 return true; 558 return true;
562} 559}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index cdacf3f5b77a..598ed2facf85 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -433,7 +433,8 @@ int i915_gem_init_stolen(struct drm_device *dev)
433 &reserved_size); 433 &reserved_size);
434 break; 434 break;
435 default: 435 default:
436 if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv)) 436 if (IS_BROADWELL(dev_priv) ||
437 IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev))
437 bdw_get_stolen_reserved(dev_priv, &reserved_base, 438 bdw_get_stolen_reserved(dev_priv, &reserved_base,
438 &reserved_size); 439 &reserved_size);
439 else 440 else
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 8a6717cc265c..7410f6c962e7 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -176,6 +176,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
176 return -EINVAL; 176 return -EINVAL;
177 } 177 }
178 178
179 intel_runtime_pm_get(dev_priv);
180
179 mutex_lock(&dev->struct_mutex); 181 mutex_lock(&dev->struct_mutex);
180 if (obj->pin_display || obj->framebuffer_references) { 182 if (obj->pin_display || obj->framebuffer_references) {
181 ret = -EBUSY; 183 ret = -EBUSY;
@@ -269,6 +271,8 @@ err:
269 drm_gem_object_unreference(&obj->base); 271 drm_gem_object_unreference(&obj->base);
270 mutex_unlock(&dev->struct_mutex); 272 mutex_unlock(&dev->struct_mutex);
271 273
274 intel_runtime_pm_put(dev_priv);
275
272 return ret; 276 return ret;
273} 277}
274 278
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 2f04e4f2ff35..06ca4082735b 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -366,6 +366,17 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
366 err_printf(m, "Suspend count: %u\n", error->suspend_count); 366 err_printf(m, "Suspend count: %u\n", error->suspend_count);
367 err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device); 367 err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
368 err_printf(m, "IOMMU enabled?: %d\n", error->iommu); 368 err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
369
370 if (HAS_CSR(dev)) {
371 struct intel_csr *csr = &dev_priv->csr;
372
373 err_printf(m, "DMC loaded: %s\n",
374 yesno(csr->dmc_payload != NULL));
375 err_printf(m, "DMC fw version: %d.%d\n",
376 CSR_VERSION_MAJOR(csr->version),
377 CSR_VERSION_MINOR(csr->version));
378 }
379
369 err_printf(m, "EIR: 0x%08x\n", error->eir); 380 err_printf(m, "EIR: 0x%08x\n", error->eir);
370 err_printf(m, "IER: 0x%08x\n", error->ier); 381 err_printf(m, "IER: 0x%08x\n", error->ier);
371 if (INTEL_INFO(dev)->gen >= 8) { 382 if (INTEL_INFO(dev)->gen >= 8) {
@@ -862,7 +873,7 @@ static void i915_record_ring_state(struct drm_device *dev,
862 struct drm_i915_private *dev_priv = dev->dev_private; 873 struct drm_i915_private *dev_priv = dev->dev_private;
863 874
864 if (INTEL_INFO(dev)->gen >= 6) { 875 if (INTEL_INFO(dev)->gen >= 6) {
865 ering->rc_psmi = I915_READ(ring->mmio_base + 0x50); 876 ering->rc_psmi = I915_READ(RING_PSMI_CTL(ring->mmio_base));
866 ering->fault_reg = I915_READ(RING_FAULT_REG(ring)); 877 ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
867 if (INTEL_INFO(dev)->gen >= 8) 878 if (INTEL_INFO(dev)->gen >= 8)
868 gen8_record_semaphore_state(dev_priv, error, ring, ering); 879 gen8_record_semaphore_state(dev_priv, error, ring, ering);
@@ -899,7 +910,7 @@ static void i915_record_ring_state(struct drm_device *dev,
899 ering->ctl = I915_READ_CTL(ring); 910 ering->ctl = I915_READ_CTL(ring);
900 911
901 if (I915_NEED_GFX_HWS(dev)) { 912 if (I915_NEED_GFX_HWS(dev)) {
902 int mmio; 913 i915_reg_t mmio;
903 914
904 if (IS_GEN7(dev)) { 915 if (IS_GEN7(dev)) {
905 switch (ring->id) { 916 switch (ring->id) {
@@ -1071,6 +1082,25 @@ static void i915_gem_record_rings(struct drm_device *dev,
1071 list_for_each_entry(request, &ring->request_list, list) { 1082 list_for_each_entry(request, &ring->request_list, list) {
1072 struct drm_i915_error_request *erq; 1083 struct drm_i915_error_request *erq;
1073 1084
1085 if (count >= error->ring[i].num_requests) {
1086 /*
1087 * If the ring request list was changed in
1088 * between the point where the error request
1089 * list was created and dimensioned and this
1090 * point then just exit early to avoid crashes.
1091 *
1092 * We don't need to communicate that the
1093 * request list changed state during error
1094 * state capture and that the error state is
1095 * slightly incorrect as a consequence since we
1096 * are typically only interested in the request
1097 * list state at the point of error state
1098 * capture, not in any changes happening during
1099 * the capture.
1100 */
1101 break;
1102 }
1103
1074 erq = &error->ring[i].requests[count++]; 1104 erq = &error->ring[i].requests[count++];
1075 erq->seqno = request->seqno; 1105 erq->seqno = request->seqno;
1076 erq->jiffies = request->emitted_jiffies; 1106 erq->jiffies = request->emitted_jiffies;
@@ -1181,7 +1211,7 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
1181 if (IS_VALLEYVIEW(dev)) { 1211 if (IS_VALLEYVIEW(dev)) {
1182 error->gtier[0] = I915_READ(GTIER); 1212 error->gtier[0] = I915_READ(GTIER);
1183 error->ier = I915_READ(VLV_IER); 1213 error->ier = I915_READ(VLV_IER);
1184 error->forcewake = I915_READ(FORCEWAKE_VLV); 1214 error->forcewake = I915_READ_FW(FORCEWAKE_VLV);
1185 } 1215 }
1186 1216
1187 if (IS_GEN7(dev)) 1217 if (IS_GEN7(dev))
@@ -1193,14 +1223,14 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
1193 } 1223 }
1194 1224
1195 if (IS_GEN6(dev)) { 1225 if (IS_GEN6(dev)) {
1196 error->forcewake = I915_READ(FORCEWAKE); 1226 error->forcewake = I915_READ_FW(FORCEWAKE);
1197 error->gab_ctl = I915_READ(GAB_CTL); 1227 error->gab_ctl = I915_READ(GAB_CTL);
1198 error->gfx_mode = I915_READ(GFX_MODE); 1228 error->gfx_mode = I915_READ(GFX_MODE);
1199 } 1229 }
1200 1230
1201 /* 2: Registers which belong to multiple generations */ 1231 /* 2: Registers which belong to multiple generations */
1202 if (INTEL_INFO(dev)->gen >= 7) 1232 if (INTEL_INFO(dev)->gen >= 7)
1203 error->forcewake = I915_READ(FORCEWAKE_MT); 1233 error->forcewake = I915_READ_FW(FORCEWAKE_MT);
1204 1234
1205 if (INTEL_INFO(dev)->gen >= 6) { 1235 if (INTEL_INFO(dev)->gen >= 6) {
1206 error->derrmr = I915_READ(DERRMR); 1236 error->derrmr = I915_READ(DERRMR);
diff --git a/drivers/gpu/drm/i915/i915_guc_reg.h b/drivers/gpu/drm/i915/i915_guc_reg.h
index c4cb1c0c4d0d..685c7991e24f 100644
--- a/drivers/gpu/drm/i915/i915_guc_reg.h
+++ b/drivers/gpu/drm/i915/i915_guc_reg.h
@@ -26,7 +26,7 @@
26 26
27/* Definitions of GuC H/W registers, bits, etc */ 27/* Definitions of GuC H/W registers, bits, etc */
28 28
29#define GUC_STATUS 0xc000 29#define GUC_STATUS _MMIO(0xc000)
30#define GS_BOOTROM_SHIFT 1 30#define GS_BOOTROM_SHIFT 1
31#define GS_BOOTROM_MASK (0x7F << GS_BOOTROM_SHIFT) 31#define GS_BOOTROM_MASK (0x7F << GS_BOOTROM_SHIFT)
32#define GS_BOOTROM_RSA_FAILED (0x50 << GS_BOOTROM_SHIFT) 32#define GS_BOOTROM_RSA_FAILED (0x50 << GS_BOOTROM_SHIFT)
@@ -39,40 +39,41 @@
39#define GS_MIA_MASK (0x07 << GS_MIA_SHIFT) 39#define GS_MIA_MASK (0x07 << GS_MIA_SHIFT)
40#define GS_MIA_CORE_STATE (1 << GS_MIA_SHIFT) 40#define GS_MIA_CORE_STATE (1 << GS_MIA_SHIFT)
41 41
42#define SOFT_SCRATCH(n) (0xc180 + ((n) * 4)) 42#define SOFT_SCRATCH(n) _MMIO(0xc180 + (n) * 4)
43 43
44#define UOS_RSA_SCRATCH(i) (0xc200 + (i) * 4) 44#define UOS_RSA_SCRATCH(i) _MMIO(0xc200 + (i) * 4)
45#define DMA_ADDR_0_LOW 0xc300 45#define UOS_RSA_SCRATCH_MAX_COUNT 64
46#define DMA_ADDR_0_HIGH 0xc304 46#define DMA_ADDR_0_LOW _MMIO(0xc300)
47#define DMA_ADDR_1_LOW 0xc308 47#define DMA_ADDR_0_HIGH _MMIO(0xc304)
48#define DMA_ADDR_1_HIGH 0xc30c 48#define DMA_ADDR_1_LOW _MMIO(0xc308)
49#define DMA_ADDR_1_HIGH _MMIO(0xc30c)
49#define DMA_ADDRESS_SPACE_WOPCM (7 << 16) 50#define DMA_ADDRESS_SPACE_WOPCM (7 << 16)
50#define DMA_ADDRESS_SPACE_GTT (8 << 16) 51#define DMA_ADDRESS_SPACE_GTT (8 << 16)
51#define DMA_COPY_SIZE 0xc310 52#define DMA_COPY_SIZE _MMIO(0xc310)
52#define DMA_CTRL 0xc314 53#define DMA_CTRL _MMIO(0xc314)
53#define UOS_MOVE (1<<4) 54#define UOS_MOVE (1<<4)
54#define START_DMA (1<<0) 55#define START_DMA (1<<0)
55#define DMA_GUC_WOPCM_OFFSET 0xc340 56#define DMA_GUC_WOPCM_OFFSET _MMIO(0xc340)
56#define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */ 57#define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */
57#define GUC_MAX_IDLE_COUNT 0xC3E4 58#define GUC_MAX_IDLE_COUNT _MMIO(0xC3E4)
58 59
59#define GUC_WOPCM_SIZE 0xc050 60#define GUC_WOPCM_SIZE _MMIO(0xc050)
60#define GUC_WOPCM_SIZE_VALUE (0x80 << 12) /* 512KB */ 61#define GUC_WOPCM_SIZE_VALUE (0x80 << 12) /* 512KB */
61 62
62/* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */ 63/* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */
63#define GUC_WOPCM_TOP (GUC_WOPCM_SIZE_VALUE) 64#define GUC_WOPCM_TOP (GUC_WOPCM_SIZE_VALUE)
64 65
65#define GEN8_GT_PM_CONFIG 0x138140 66#define GEN8_GT_PM_CONFIG _MMIO(0x138140)
66#define GEN9LP_GT_PM_CONFIG 0x138140 67#define GEN9LP_GT_PM_CONFIG _MMIO(0x138140)
67#define GEN9_GT_PM_CONFIG 0x13816c 68#define GEN9_GT_PM_CONFIG _MMIO(0x13816c)
68#define GT_DOORBELL_ENABLE (1<<0) 69#define GT_DOORBELL_ENABLE (1<<0)
69 70
70#define GEN8_GTCR 0x4274 71#define GEN8_GTCR _MMIO(0x4274)
71#define GEN8_GTCR_INVALIDATE (1<<0) 72#define GEN8_GTCR_INVALIDATE (1<<0)
72 73
73#define GUC_ARAT_C6DIS 0xA178 74#define GUC_ARAT_C6DIS _MMIO(0xA178)
74 75
75#define GUC_SHIM_CONTROL 0xc064 76#define GUC_SHIM_CONTROL _MMIO(0xc064)
76#define GUC_DISABLE_SRAM_INIT_TO_ZEROES (1<<0) 77#define GUC_DISABLE_SRAM_INIT_TO_ZEROES (1<<0)
77#define GUC_ENABLE_READ_CACHE_LOGIC (1<<1) 78#define GUC_ENABLE_READ_CACHE_LOGIC (1<<1)
78#define GUC_ENABLE_MIA_CACHING (1<<2) 79#define GUC_ENABLE_MIA_CACHING (1<<2)
@@ -89,21 +90,21 @@
89 GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA | \ 90 GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA | \
90 GUC_ENABLE_MIA_CLOCK_GATING) 91 GUC_ENABLE_MIA_CLOCK_GATING)
91 92
92#define HOST2GUC_INTERRUPT 0xc4c8 93#define HOST2GUC_INTERRUPT _MMIO(0xc4c8)
93#define HOST2GUC_TRIGGER (1<<0) 94#define HOST2GUC_TRIGGER (1<<0)
94 95
95#define DRBMISC1 0x1984 96#define DRBMISC1 0x1984
96#define DOORBELL_ENABLE (1<<0) 97#define DOORBELL_ENABLE (1<<0)
97 98
98#define GEN8_DRBREGL(x) (0x1000 + (x) * 8) 99#define GEN8_DRBREGL(x) _MMIO(0x1000 + (x) * 8)
99#define GEN8_DRB_VALID (1<<0) 100#define GEN8_DRB_VALID (1<<0)
100#define GEN8_DRBREGU(x) (GEN8_DRBREGL(x) + 4) 101#define GEN8_DRBREGU(x) _MMIO(0x1000 + (x) * 8 + 4)
101 102
102#define DE_GUCRMR 0x44054 103#define DE_GUCRMR _MMIO(0x44054)
103 104
104#define GUC_BCS_RCS_IER 0xC550 105#define GUC_BCS_RCS_IER _MMIO(0xC550)
105#define GUC_VCS2_VCS1_IER 0xC554 106#define GUC_VCS2_VCS1_IER _MMIO(0xC554)
106#define GUC_WD_VECS_IER 0xC558 107#define GUC_WD_VECS_IER _MMIO(0xC558)
107#define GUC_PM_P24C_IER 0xC55C 108#define GUC_PM_P24C_IER _MMIO(0xC55C)
108 109
109#endif 110#endif
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 036b42bae827..0d23785ba818 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -27,7 +27,7 @@
27#include "intel_guc.h" 27#include "intel_guc.h"
28 28
29/** 29/**
30 * DOC: GuC Client 30 * DOC: GuC-based command submission
31 * 31 *
32 * i915_guc_client: 32 * i915_guc_client:
33 * We use the term client to avoid confusion with contexts. A i915_guc_client is 33 * We use the term client to avoid confusion with contexts. A i915_guc_client is
@@ -86,7 +86,6 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
86 return -EINVAL; 86 return -EINVAL;
87 87
88 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 88 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
89 spin_lock(&dev_priv->guc.host2guc_lock);
90 89
91 dev_priv->guc.action_count += 1; 90 dev_priv->guc.action_count += 1;
92 dev_priv->guc.action_cmd = data[0]; 91 dev_priv->guc.action_cmd = data[0];
@@ -119,7 +118,6 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
119 } 118 }
120 dev_priv->guc.action_status = status; 119 dev_priv->guc.action_status = status;
121 120
122 spin_unlock(&dev_priv->guc.host2guc_lock);
123 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 121 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
124 122
125 return ret; 123 return ret;
@@ -161,9 +159,9 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
161 data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE; 159 data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
162 /* WaRsDisableCoarsePowerGating:skl,bxt */ 160 /* WaRsDisableCoarsePowerGating:skl,bxt */
163 if (!intel_enable_rc6(dev_priv->dev) || 161 if (!intel_enable_rc6(dev_priv->dev) ||
164 (IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) || 162 IS_BXT_REVID(dev, 0, BXT_REVID_A1) ||
165 (IS_SKL_GT3(dev) && (INTEL_REVID(dev) <= SKL_REVID_E0)) || 163 (IS_SKL_GT3(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)) ||
166 (IS_SKL_GT4(dev) && (INTEL_REVID(dev) <= SKL_REVID_E0))) 164 (IS_SKL_GT4(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)))
167 data[1] = 0; 165 data[1] = 0;
168 else 166 else
169 /* bit 0 and 1 are for Render and Media domain separately */ 167 /* bit 0 and 1 are for Render and Media domain separately */
@@ -258,7 +256,7 @@ static void guc_disable_doorbell(struct intel_guc *guc,
258 struct drm_i915_private *dev_priv = guc_to_i915(guc); 256 struct drm_i915_private *dev_priv = guc_to_i915(guc);
259 struct guc_doorbell_info *doorbell; 257 struct guc_doorbell_info *doorbell;
260 void *base; 258 void *base;
261 int drbreg = GEN8_DRBREGL(client->doorbell_id); 259 i915_reg_t drbreg = GEN8_DRBREGL(client->doorbell_id);
262 int value; 260 int value;
263 261
264 base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0)); 262 base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
@@ -292,16 +290,12 @@ static uint32_t select_doorbell_cacheline(struct intel_guc *guc)
292 const uint32_t cacheline_size = cache_line_size(); 290 const uint32_t cacheline_size = cache_line_size();
293 uint32_t offset; 291 uint32_t offset;
294 292
295 spin_lock(&guc->host2guc_lock);
296
297 /* Doorbell uses a single cache line within a page */ 293 /* Doorbell uses a single cache line within a page */
298 offset = offset_in_page(guc->db_cacheline); 294 offset = offset_in_page(guc->db_cacheline);
299 295
300 /* Moving to next cache line to reduce contention */ 296 /* Moving to next cache line to reduce contention */
301 guc->db_cacheline += cacheline_size; 297 guc->db_cacheline += cacheline_size;
302 298
303 spin_unlock(&guc->host2guc_lock);
304
305 DRM_DEBUG_DRIVER("selected doorbell cacheline 0x%x, next 0x%x, linesize %u\n", 299 DRM_DEBUG_DRIVER("selected doorbell cacheline 0x%x, next 0x%x, linesize %u\n",
306 offset, guc->db_cacheline, cacheline_size); 300 offset, guc->db_cacheline, cacheline_size);
307 301
@@ -322,13 +316,11 @@ static uint16_t assign_doorbell(struct intel_guc *guc, uint32_t priority)
322 const uint16_t end = start + half; 316 const uint16_t end = start + half;
323 uint16_t id; 317 uint16_t id;
324 318
325 spin_lock(&guc->host2guc_lock);
326 id = find_next_zero_bit(guc->doorbell_bitmap, end, start); 319 id = find_next_zero_bit(guc->doorbell_bitmap, end, start);
327 if (id == end) 320 if (id == end)
328 id = GUC_INVALID_DOORBELL_ID; 321 id = GUC_INVALID_DOORBELL_ID;
329 else 322 else
330 bitmap_set(guc->doorbell_bitmap, id, 1); 323 bitmap_set(guc->doorbell_bitmap, id, 1);
331 spin_unlock(&guc->host2guc_lock);
332 324
333 DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n", 325 DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n",
334 hi_pri ? "high" : "normal", id); 326 hi_pri ? "high" : "normal", id);
@@ -338,9 +330,7 @@ static uint16_t assign_doorbell(struct intel_guc *guc, uint32_t priority)
338 330
339static void release_doorbell(struct intel_guc *guc, uint16_t id) 331static void release_doorbell(struct intel_guc *guc, uint16_t id)
340{ 332{
341 spin_lock(&guc->host2guc_lock);
342 bitmap_clear(guc->doorbell_bitmap, id, 1); 333 bitmap_clear(guc->doorbell_bitmap, id, 1);
343 spin_unlock(&guc->host2guc_lock);
344} 334}
345 335
346/* 336/*
@@ -487,16 +477,13 @@ static int guc_get_workqueue_space(struct i915_guc_client *gc, u32 *offset)
487 struct guc_process_desc *desc; 477 struct guc_process_desc *desc;
488 void *base; 478 void *base;
489 u32 size = sizeof(struct guc_wq_item); 479 u32 size = sizeof(struct guc_wq_item);
490 int ret = 0, timeout_counter = 200; 480 int ret = -ETIMEDOUT, timeout_counter = 200;
491 481
492 base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0)); 482 base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
493 desc = base + gc->proc_desc_offset; 483 desc = base + gc->proc_desc_offset;
494 484
495 while (timeout_counter-- > 0) { 485 while (timeout_counter-- > 0) {
496 ret = wait_for_atomic(CIRC_SPACE(gc->wq_tail, desc->head, 486 if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) {
497 gc->wq_size) >= size, 1);
498
499 if (!ret) {
500 *offset = gc->wq_tail; 487 *offset = gc->wq_tail;
501 488
502 /* advance the tail for next workqueue item */ 489 /* advance the tail for next workqueue item */
@@ -505,7 +492,11 @@ static int guc_get_workqueue_space(struct i915_guc_client *gc, u32 *offset)
505 492
506 /* this will break the loop */ 493 /* this will break the loop */
507 timeout_counter = 0; 494 timeout_counter = 0;
495 ret = 0;
508 } 496 }
497
498 if (timeout_counter)
499 usleep_range(1000, 2000);
509 }; 500 };
510 501
511 kunmap_atomic(base); 502 kunmap_atomic(base);
@@ -588,8 +579,7 @@ static void lr_context_update(struct drm_i915_gem_request *rq)
588/** 579/**
589 * i915_guc_submit() - Submit commands through GuC 580 * i915_guc_submit() - Submit commands through GuC
590 * @client: the guc client where commands will go through 581 * @client: the guc client where commands will go through
591 * @ctx: LRC where commands come from 582 * @rq: request associated with the commands
592 * @ring: HW engine that will excute the commands
593 * 583 *
594 * Return: 0 if succeed 584 * Return: 0 if succeed
595 */ 585 */
@@ -598,15 +588,12 @@ int i915_guc_submit(struct i915_guc_client *client,
598{ 588{
599 struct intel_guc *guc = client->guc; 589 struct intel_guc *guc = client->guc;
600 enum intel_ring_id ring_id = rq->ring->id; 590 enum intel_ring_id ring_id = rq->ring->id;
601 unsigned long flags;
602 int q_ret, b_ret; 591 int q_ret, b_ret;
603 592
604 /* Need this because of the deferred pin ctx and ring */ 593 /* Need this because of the deferred pin ctx and ring */
605 /* Shall we move this right after ring is pinned? */ 594 /* Shall we move this right after ring is pinned? */
606 lr_context_update(rq); 595 lr_context_update(rq);
607 596
608 spin_lock_irqsave(&client->wq_lock, flags);
609
610 q_ret = guc_add_workqueue_item(client, rq); 597 q_ret = guc_add_workqueue_item(client, rq);
611 if (q_ret == 0) 598 if (q_ret == 0)
612 b_ret = guc_ring_doorbell(client); 599 b_ret = guc_ring_doorbell(client);
@@ -621,12 +608,8 @@ int i915_guc_submit(struct i915_guc_client *client,
621 } else { 608 } else {
622 client->retcode = 0; 609 client->retcode = 0;
623 } 610 }
624 spin_unlock_irqrestore(&client->wq_lock, flags);
625
626 spin_lock(&guc->host2guc_lock);
627 guc->submissions[ring_id] += 1; 611 guc->submissions[ring_id] += 1;
628 guc->last_seqno[ring_id] = rq->seqno; 612 guc->last_seqno[ring_id] = rq->seqno;
629 spin_unlock(&guc->host2guc_lock);
630 613
631 return q_ret; 614 return q_ret;
632} 615}
@@ -678,7 +661,7 @@ static struct drm_i915_gem_object *gem_allocate_guc_obj(struct drm_device *dev,
678/** 661/**
679 * gem_release_guc_obj() - Release gem object allocated for GuC usage 662 * gem_release_guc_obj() - Release gem object allocated for GuC usage
680 * @obj: gem obj to be released 663 * @obj: gem obj to be released
681 */ 664 */
682static void gem_release_guc_obj(struct drm_i915_gem_object *obj) 665static void gem_release_guc_obj(struct drm_i915_gem_object *obj)
683{ 666{
684 if (!obj) 667 if (!obj)
@@ -731,7 +714,8 @@ static void guc_client_free(struct drm_device *dev,
731 * The kernel client to replace ExecList submission is created with 714 * The kernel client to replace ExecList submission is created with
732 * NORMAL priority. Priority of a client for scheduler can be HIGH, 715 * NORMAL priority. Priority of a client for scheduler can be HIGH,
733 * while a preemption context can use CRITICAL. 716 * while a preemption context can use CRITICAL.
734 * @ctx the context to own the client (we use the default render context) 717 * @ctx: the context that owns the client (we use the default render
718 * context)
735 * 719 *
736 * Return: An i915_guc_client object if success. 720 * Return: An i915_guc_client object if success.
737 */ 721 */
@@ -768,7 +752,6 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
768 client->client_obj = obj; 752 client->client_obj = obj;
769 client->wq_offset = GUC_DB_SIZE; 753 client->wq_offset = GUC_DB_SIZE;
770 client->wq_size = GUC_WQ_SIZE; 754 client->wq_size = GUC_WQ_SIZE;
771 spin_lock_init(&client->wq_lock);
772 755
773 client->doorbell_offset = select_doorbell_cacheline(guc); 756 client->doorbell_offset = select_doorbell_cacheline(guc);
774 757
@@ -871,8 +854,6 @@ int i915_guc_submission_init(struct drm_device *dev)
871 if (!guc->ctx_pool_obj) 854 if (!guc->ctx_pool_obj)
872 return -ENOMEM; 855 return -ENOMEM;
873 856
874 spin_lock_init(&dev_priv->guc.host2guc_lock);
875
876 ida_init(&guc->ctx_ids); 857 ida_init(&guc->ctx_ids);
877 858
878 guc_create_log(guc); 859 guc_create_log(guc);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 0d228f909dcb..e88d692583a5 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -139,7 +139,8 @@ static const u32 hpd_bxt[HPD_NUM_PINS] = {
139/* 139/*
140 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 140 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
141 */ 141 */
142static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, u32 reg) 142static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
143 i915_reg_t reg)
143{ 144{
144 u32 val = I915_READ(reg); 145 u32 val = I915_READ(reg);
145 146
@@ -147,7 +148,7 @@ static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, u32 reg)
147 return; 148 return;
148 149
149 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 150 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
150 reg, val); 151 i915_mmio_reg_offset(reg), val);
151 I915_WRITE(reg, 0xffffffff); 152 I915_WRITE(reg, 0xffffffff);
152 POSTING_READ(reg); 153 POSTING_READ(reg);
153 I915_WRITE(reg, 0xffffffff); 154 I915_WRITE(reg, 0xffffffff);
@@ -214,9 +215,9 @@ void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
214 * @interrupt_mask: mask of interrupt bits to update 215 * @interrupt_mask: mask of interrupt bits to update
215 * @enabled_irq_mask: mask of interrupt bits to enable 216 * @enabled_irq_mask: mask of interrupt bits to enable
216 */ 217 */
217static void ilk_update_display_irq(struct drm_i915_private *dev_priv, 218void ilk_update_display_irq(struct drm_i915_private *dev_priv,
218 uint32_t interrupt_mask, 219 uint32_t interrupt_mask,
219 uint32_t enabled_irq_mask) 220 uint32_t enabled_irq_mask)
220{ 221{
221 uint32_t new_val; 222 uint32_t new_val;
222 223
@@ -238,18 +239,6 @@ static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
238 } 239 }
239} 240}
240 241
241void
242ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
243{
244 ilk_update_display_irq(dev_priv, mask, mask);
245}
246
247void
248ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
249{
250 ilk_update_display_irq(dev_priv, mask, 0);
251}
252
253/** 242/**
254 * ilk_update_gt_irq - update GTIMR 243 * ilk_update_gt_irq - update GTIMR
255 * @dev_priv: driver private 244 * @dev_priv: driver private
@@ -283,27 +272,27 @@ void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
283 ilk_update_gt_irq(dev_priv, mask, 0); 272 ilk_update_gt_irq(dev_priv, mask, 0);
284} 273}
285 274
286static u32 gen6_pm_iir(struct drm_i915_private *dev_priv) 275static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
287{ 276{
288 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 277 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
289} 278}
290 279
291static u32 gen6_pm_imr(struct drm_i915_private *dev_priv) 280static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
292{ 281{
293 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR; 282 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
294} 283}
295 284
296static u32 gen6_pm_ier(struct drm_i915_private *dev_priv) 285static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
297{ 286{
298 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER; 287 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
299} 288}
300 289
301/** 290/**
302 * snb_update_pm_irq - update GEN6_PMIMR 291 * snb_update_pm_irq - update GEN6_PMIMR
303 * @dev_priv: driver private 292 * @dev_priv: driver private
304 * @interrupt_mask: mask of interrupt bits to update 293 * @interrupt_mask: mask of interrupt bits to update
305 * @enabled_irq_mask: mask of interrupt bits to enable 294 * @enabled_irq_mask: mask of interrupt bits to enable
306 */ 295 */
307static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 296static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
308 uint32_t interrupt_mask, 297 uint32_t interrupt_mask,
309 uint32_t enabled_irq_mask) 298 uint32_t enabled_irq_mask)
@@ -350,7 +339,7 @@ void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
350void gen6_reset_rps_interrupts(struct drm_device *dev) 339void gen6_reset_rps_interrupts(struct drm_device *dev)
351{ 340{
352 struct drm_i915_private *dev_priv = dev->dev_private; 341 struct drm_i915_private *dev_priv = dev->dev_private;
353 uint32_t reg = gen6_pm_iir(dev_priv); 342 i915_reg_t reg = gen6_pm_iir(dev_priv);
354 343
355 spin_lock_irq(&dev_priv->irq_lock); 344 spin_lock_irq(&dev_priv->irq_lock);
356 I915_WRITE(reg, dev_priv->pm_rps_events); 345 I915_WRITE(reg, dev_priv->pm_rps_events);
@@ -417,11 +406,11 @@ void gen6_disable_rps_interrupts(struct drm_device *dev)
417} 406}
418 407
419/** 408/**
420 * bdw_update_port_irq - update DE port interrupt 409 * bdw_update_port_irq - update DE port interrupt
421 * @dev_priv: driver private 410 * @dev_priv: driver private
422 * @interrupt_mask: mask of interrupt bits to update 411 * @interrupt_mask: mask of interrupt bits to update
423 * @enabled_irq_mask: mask of interrupt bits to enable 412 * @enabled_irq_mask: mask of interrupt bits to enable
424 */ 413 */
425static void bdw_update_port_irq(struct drm_i915_private *dev_priv, 414static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
426 uint32_t interrupt_mask, 415 uint32_t interrupt_mask,
427 uint32_t enabled_irq_mask) 416 uint32_t enabled_irq_mask)
@@ -449,6 +438,38 @@ static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
449} 438}
450 439
451/** 440/**
441 * bdw_update_pipe_irq - update DE pipe interrupt
442 * @dev_priv: driver private
443 * @pipe: pipe whose interrupt to update
444 * @interrupt_mask: mask of interrupt bits to update
445 * @enabled_irq_mask: mask of interrupt bits to enable
446 */
447void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
448 enum pipe pipe,
449 uint32_t interrupt_mask,
450 uint32_t enabled_irq_mask)
451{
452 uint32_t new_val;
453
454 assert_spin_locked(&dev_priv->irq_lock);
455
456 WARN_ON(enabled_irq_mask & ~interrupt_mask);
457
458 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
459 return;
460
461 new_val = dev_priv->de_irq_mask[pipe];
462 new_val &= ~interrupt_mask;
463 new_val |= (~enabled_irq_mask & interrupt_mask);
464
465 if (new_val != dev_priv->de_irq_mask[pipe]) {
466 dev_priv->de_irq_mask[pipe] = new_val;
467 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
468 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
469 }
470}
471
472/**
452 * ibx_display_interrupt_update - update SDEIMR 473 * ibx_display_interrupt_update - update SDEIMR
453 * @dev_priv: driver private 474 * @dev_priv: driver private
454 * @interrupt_mask: mask of interrupt bits to update 475 * @interrupt_mask: mask of interrupt bits to update
@@ -477,7 +498,7 @@ static void
477__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 498__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
478 u32 enable_mask, u32 status_mask) 499 u32 enable_mask, u32 status_mask)
479{ 500{
480 u32 reg = PIPESTAT(pipe); 501 i915_reg_t reg = PIPESTAT(pipe);
481 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 502 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
482 503
483 assert_spin_locked(&dev_priv->irq_lock); 504 assert_spin_locked(&dev_priv->irq_lock);
@@ -504,7 +525,7 @@ static void
504__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 525__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
505 u32 enable_mask, u32 status_mask) 526 u32 enable_mask, u32 status_mask)
506{ 527{
507 u32 reg = PIPESTAT(pipe); 528 i915_reg_t reg = PIPESTAT(pipe);
508 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 529 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
509 530
510 assert_spin_locked(&dev_priv->irq_lock); 531 assert_spin_locked(&dev_priv->irq_lock);
@@ -665,8 +686,7 @@ static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
665static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 686static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
666{ 687{
667 struct drm_i915_private *dev_priv = dev->dev_private; 688 struct drm_i915_private *dev_priv = dev->dev_private;
668 unsigned long high_frame; 689 i915_reg_t high_frame, low_frame;
669 unsigned long low_frame;
670 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 690 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
671 struct intel_crtc *intel_crtc = 691 struct intel_crtc *intel_crtc =
672 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 692 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
@@ -717,9 +737,7 @@ static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
717 return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); 737 return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
718} 738}
719 739
720/* raw reads, only for fast reads of display block, no need for forcewake etc. */ 740/* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
721#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
722
723static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 741static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
724{ 742{
725 struct drm_device *dev = crtc->base.dev; 743 struct drm_device *dev = crtc->base.dev;
@@ -733,9 +751,9 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
733 vtotal /= 2; 751 vtotal /= 2;
734 752
735 if (IS_GEN2(dev)) 753 if (IS_GEN2(dev))
736 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 754 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
737 else 755 else
738 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 756 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
739 757
740 /* 758 /*
741 * On HSW, the DSL reg (0x70000) appears to return 0 if we 759 * On HSW, the DSL reg (0x70000) appears to return 0 if we
@@ -827,7 +845,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
827 * We can split this into vertical and horizontal 845 * We can split this into vertical and horizontal
828 * scanout position. 846 * scanout position.
829 */ 847 */
830 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 848 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
831 849
832 /* convert to pixel counts */ 850 /* convert to pixel counts */
833 vbl_start *= htotal; 851 vbl_start *= htotal;
@@ -1188,7 +1206,7 @@ static void ivybridge_parity_work(struct work_struct *work)
1188 POSTING_READ(GEN7_MISCCPCTL); 1206 POSTING_READ(GEN7_MISCCPCTL);
1189 1207
1190 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1208 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1191 u32 reg; 1209 i915_reg_t reg;
1192 1210
1193 slice--; 1211 slice--;
1194 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) 1212 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
@@ -1196,7 +1214,7 @@ static void ivybridge_parity_work(struct work_struct *work)
1196 1214
1197 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1215 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1198 1216
1199 reg = GEN7_L3CDERRST1 + (slice * 0x200); 1217 reg = GEN7_L3CDERRST1(slice);
1200 1218
1201 error_status = I915_READ(reg); 1219 error_status = I915_READ(reg);
1202 row = GEN7_PARITY_ERROR_ROW(error_status); 1220 row = GEN7_PARITY_ERROR_ROW(error_status);
@@ -1290,70 +1308,69 @@ static void snb_gt_irq_handler(struct drm_device *dev,
1290 ivybridge_parity_error_irq_handler(dev, gt_iir); 1308 ivybridge_parity_error_irq_handler(dev, gt_iir);
1291} 1309}
1292 1310
1311static __always_inline void
1312gen8_cs_irq_handler(struct intel_engine_cs *ring, u32 iir, int test_shift)
1313{
1314 if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
1315 notify_ring(ring);
1316 if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
1317 intel_lrc_irq_handler(ring);
1318}
1319
1293static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv, 1320static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
1294 u32 master_ctl) 1321 u32 master_ctl)
1295{ 1322{
1296 irqreturn_t ret = IRQ_NONE; 1323 irqreturn_t ret = IRQ_NONE;
1297 1324
1298 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1325 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1299 u32 tmp = I915_READ_FW(GEN8_GT_IIR(0)); 1326 u32 iir = I915_READ_FW(GEN8_GT_IIR(0));
1300 if (tmp) { 1327 if (iir) {
1301 I915_WRITE_FW(GEN8_GT_IIR(0), tmp); 1328 I915_WRITE_FW(GEN8_GT_IIR(0), iir);
1302 ret = IRQ_HANDLED; 1329 ret = IRQ_HANDLED;
1303 1330
1304 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT)) 1331 gen8_cs_irq_handler(&dev_priv->ring[RCS],
1305 intel_lrc_irq_handler(&dev_priv->ring[RCS]); 1332 iir, GEN8_RCS_IRQ_SHIFT);
1306 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1307 notify_ring(&dev_priv->ring[RCS]);
1308 1333
1309 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT)) 1334 gen8_cs_irq_handler(&dev_priv->ring[BCS],
1310 intel_lrc_irq_handler(&dev_priv->ring[BCS]); 1335 iir, GEN8_BCS_IRQ_SHIFT);
1311 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1312 notify_ring(&dev_priv->ring[BCS]);
1313 } else 1336 } else
1314 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1337 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1315 } 1338 }
1316 1339
1317 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1340 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1318 u32 tmp = I915_READ_FW(GEN8_GT_IIR(1)); 1341 u32 iir = I915_READ_FW(GEN8_GT_IIR(1));
1319 if (tmp) { 1342 if (iir) {
1320 I915_WRITE_FW(GEN8_GT_IIR(1), tmp); 1343 I915_WRITE_FW(GEN8_GT_IIR(1), iir);
1321 ret = IRQ_HANDLED; 1344 ret = IRQ_HANDLED;
1322 1345
1323 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT)) 1346 gen8_cs_irq_handler(&dev_priv->ring[VCS],
1324 intel_lrc_irq_handler(&dev_priv->ring[VCS]); 1347 iir, GEN8_VCS1_IRQ_SHIFT);
1325 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1326 notify_ring(&dev_priv->ring[VCS]);
1327 1348
1328 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT)) 1349 gen8_cs_irq_handler(&dev_priv->ring[VCS2],
1329 intel_lrc_irq_handler(&dev_priv->ring[VCS2]); 1350 iir, GEN8_VCS2_IRQ_SHIFT);
1330 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1331 notify_ring(&dev_priv->ring[VCS2]);
1332 } else 1351 } else
1333 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1352 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1334 } 1353 }
1335 1354
1336 if (master_ctl & GEN8_GT_VECS_IRQ) { 1355 if (master_ctl & GEN8_GT_VECS_IRQ) {
1337 u32 tmp = I915_READ_FW(GEN8_GT_IIR(3)); 1356 u32 iir = I915_READ_FW(GEN8_GT_IIR(3));
1338 if (tmp) { 1357 if (iir) {
1339 I915_WRITE_FW(GEN8_GT_IIR(3), tmp); 1358 I915_WRITE_FW(GEN8_GT_IIR(3), iir);
1340 ret = IRQ_HANDLED; 1359 ret = IRQ_HANDLED;
1341 1360
1342 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)) 1361 gen8_cs_irq_handler(&dev_priv->ring[VECS],
1343 intel_lrc_irq_handler(&dev_priv->ring[VECS]); 1362 iir, GEN8_VECS_IRQ_SHIFT);
1344 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1345 notify_ring(&dev_priv->ring[VECS]);
1346 } else 1363 } else
1347 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1364 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1348 } 1365 }
1349 1366
1350 if (master_ctl & GEN8_GT_PM_IRQ) { 1367 if (master_ctl & GEN8_GT_PM_IRQ) {
1351 u32 tmp = I915_READ_FW(GEN8_GT_IIR(2)); 1368 u32 iir = I915_READ_FW(GEN8_GT_IIR(2));
1352 if (tmp & dev_priv->pm_rps_events) { 1369 if (iir & dev_priv->pm_rps_events) {
1353 I915_WRITE_FW(GEN8_GT_IIR(2), 1370 I915_WRITE_FW(GEN8_GT_IIR(2),
1354 tmp & dev_priv->pm_rps_events); 1371 iir & dev_priv->pm_rps_events);
1355 ret = IRQ_HANDLED; 1372 ret = IRQ_HANDLED;
1356 gen6_rps_irq_handler(dev_priv, tmp); 1373 gen6_rps_irq_handler(dev_priv, iir);
1357 } else 1374 } else
1358 DRM_ERROR("The master control interrupt lied (PM)!\n"); 1375 DRM_ERROR("The master control interrupt lied (PM)!\n");
1359 } 1376 }
@@ -1625,7 +1642,7 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1625 1642
1626 spin_lock(&dev_priv->irq_lock); 1643 spin_lock(&dev_priv->irq_lock);
1627 for_each_pipe(dev_priv, pipe) { 1644 for_each_pipe(dev_priv, pipe) {
1628 int reg; 1645 i915_reg_t reg;
1629 u32 mask, iir_bit = 0; 1646 u32 mask, iir_bit = 0;
1630 1647
1631 /* 1648 /*
@@ -1827,8 +1844,24 @@ static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
1827 struct drm_i915_private *dev_priv = to_i915(dev); 1844 struct drm_i915_private *dev_priv = to_i915(dev);
1828 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 1845 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1829 1846
1847 /*
1848 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
1849 * unless we touch the hotplug register, even if hotplug_trigger is
1850 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
1851 * errors.
1852 */
1830 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 1853 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1854 if (!hotplug_trigger) {
1855 u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1856 PORTD_HOTPLUG_STATUS_MASK |
1857 PORTC_HOTPLUG_STATUS_MASK |
1858 PORTB_HOTPLUG_STATUS_MASK;
1859 dig_hotplug_reg &= ~mask;
1860 }
1861
1831 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 1862 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1863 if (!hotplug_trigger)
1864 return;
1832 1865
1833 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1866 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1834 dig_hotplug_reg, hpd, 1867 dig_hotplug_reg, hpd,
@@ -1843,8 +1876,7 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1843 int pipe; 1876 int pipe;
1844 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1877 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1845 1878
1846 if (hotplug_trigger) 1879 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
1847 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
1848 1880
1849 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1881 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1850 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1882 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -1937,8 +1969,7 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1937 int pipe; 1969 int pipe;
1938 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 1970 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1939 1971
1940 if (hotplug_trigger) 1972 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
1941 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
1942 1973
1943 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 1974 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1944 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 1975 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -2644,7 +2675,7 @@ static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
2644 DE_PIPE_VBLANK(pipe); 2675 DE_PIPE_VBLANK(pipe);
2645 2676
2646 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2677 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2647 ironlake_enable_display_irq(dev_priv, bit); 2678 ilk_enable_display_irq(dev_priv, bit);
2648 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2679 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2649 2680
2650 return 0; 2681 return 0;
@@ -2669,10 +2700,9 @@ static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
2669 unsigned long irqflags; 2700 unsigned long irqflags;
2670 2701
2671 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2702 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2672 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK; 2703 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2673 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2674 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2675 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2704 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2705
2676 return 0; 2706 return 0;
2677} 2707}
2678 2708
@@ -2699,7 +2729,7 @@ static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
2699 DE_PIPE_VBLANK(pipe); 2729 DE_PIPE_VBLANK(pipe);
2700 2730
2701 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2731 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2702 ironlake_disable_display_irq(dev_priv, bit); 2732 ilk_disable_display_irq(dev_priv, bit);
2703 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2733 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2704} 2734}
2705 2735
@@ -2720,9 +2750,7 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
2720 unsigned long irqflags; 2750 unsigned long irqflags;
2721 2751
2722 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2752 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2723 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK; 2753 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2724 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2725 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2726 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2754 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2727} 2755}
2728 2756
@@ -3451,7 +3479,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
3451 * setup is guaranteed to run in single-threaded context. But we 3479 * setup is guaranteed to run in single-threaded context. But we
3452 * need it to make the assert_spin_locked happy. */ 3480 * need it to make the assert_spin_locked happy. */
3453 spin_lock_irq(&dev_priv->irq_lock); 3481 spin_lock_irq(&dev_priv->irq_lock);
3454 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 3482 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
3455 spin_unlock_irq(&dev_priv->irq_lock); 3483 spin_unlock_irq(&dev_priv->irq_lock);
3456 } 3484 }
3457 3485
@@ -3869,7 +3897,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3869 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3897 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3870 3898
3871 for_each_pipe(dev_priv, pipe) { 3899 for_each_pipe(dev_priv, pipe) {
3872 int reg = PIPESTAT(pipe); 3900 i915_reg_t reg = PIPESTAT(pipe);
3873 pipe_stats[pipe] = I915_READ(reg); 3901 pipe_stats[pipe] = I915_READ(reg);
3874 3902
3875 /* 3903 /*
@@ -4050,7 +4078,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
4050 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4078 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4051 4079
4052 for_each_pipe(dev_priv, pipe) { 4080 for_each_pipe(dev_priv, pipe) {
4053 int reg = PIPESTAT(pipe); 4081 i915_reg_t reg = PIPESTAT(pipe);
4054 pipe_stats[pipe] = I915_READ(reg); 4082 pipe_stats[pipe] = I915_READ(reg);
4055 4083
4056 /* Clear the PIPE*STAT regs before the IIR */ 4084 /* Clear the PIPE*STAT regs before the IIR */
@@ -4272,7 +4300,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
4272 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4300 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4273 4301
4274 for_each_pipe(dev_priv, pipe) { 4302 for_each_pipe(dev_priv, pipe) {
4275 int reg = PIPESTAT(pipe); 4303 i915_reg_t reg = PIPESTAT(pipe);
4276 pipe_stats[pipe] = I915_READ(reg); 4304 pipe_stats[pipe] = I915_READ(reg);
4277 4305
4278 /* 4306 /*
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 96bb23865eac..835d6099c769 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -32,6 +32,7 @@ struct i915_params i915 __read_mostly = {
32 .panel_use_ssc = -1, 32 .panel_use_ssc = -1,
33 .vbt_sdvo_panel_type = -1, 33 .vbt_sdvo_panel_type = -1,
34 .enable_rc6 = -1, 34 .enable_rc6 = -1,
35 .enable_dc = -1,
35 .enable_fbc = -1, 36 .enable_fbc = -1,
36 .enable_execlists = -1, 37 .enable_execlists = -1,
37 .enable_hangcheck = true, 38 .enable_hangcheck = true,
@@ -40,6 +41,7 @@ struct i915_params i915 __read_mostly = {
40 .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT), 41 .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT),
41 .disable_power_well = -1, 42 .disable_power_well = -1,
42 .enable_ips = 1, 43 .enable_ips = 1,
44 .fastboot = 0,
43 .prefault_disable = 0, 45 .prefault_disable = 0,
44 .load_detect_test = 0, 46 .load_detect_test = 0,
45 .reset = true, 47 .reset = true,
@@ -79,6 +81,11 @@ MODULE_PARM_DESC(enable_rc6,
79 "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. " 81 "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
80 "default: -1 (use per-chip default)"); 82 "default: -1 (use per-chip default)");
81 83
84module_param_named_unsafe(enable_dc, i915.enable_dc, int, 0400);
85MODULE_PARM_DESC(enable_dc,
86 "Enable power-saving display C-states. "
87 "(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6)");
88
82module_param_named_unsafe(enable_fbc, i915.enable_fbc, int, 0600); 89module_param_named_unsafe(enable_fbc, i915.enable_fbc, int, 0600);
83MODULE_PARM_DESC(enable_fbc, 90MODULE_PARM_DESC(enable_fbc,
84 "Enable frame buffer compression for power savings " 91 "Enable frame buffer compression for power savings "
@@ -111,7 +118,7 @@ MODULE_PARM_DESC(enable_hangcheck,
111module_param_named_unsafe(enable_ppgtt, i915.enable_ppgtt, int, 0400); 118module_param_named_unsafe(enable_ppgtt, i915.enable_ppgtt, int, 0400);
112MODULE_PARM_DESC(enable_ppgtt, 119MODULE_PARM_DESC(enable_ppgtt,
113 "Override PPGTT usage. " 120 "Override PPGTT usage. "
114 "(-1=auto [default], 0=disabled, 1=aliasing, 2=full)"); 121 "(-1=auto [default], 0=disabled, 1=aliasing, 2=full, 3=full with extended address space)");
115 122
116module_param_named_unsafe(enable_execlists, i915.enable_execlists, int, 0400); 123module_param_named_unsafe(enable_execlists, i915.enable_execlists, int, 0400);
117MODULE_PARM_DESC(enable_execlists, 124MODULE_PARM_DESC(enable_execlists,
@@ -125,7 +132,7 @@ module_param_named_unsafe(preliminary_hw_support, i915.preliminary_hw_support, i
125MODULE_PARM_DESC(preliminary_hw_support, 132MODULE_PARM_DESC(preliminary_hw_support,
126 "Enable preliminary hardware support."); 133 "Enable preliminary hardware support.");
127 134
128module_param_named_unsafe(disable_power_well, i915.disable_power_well, int, 0600); 135module_param_named_unsafe(disable_power_well, i915.disable_power_well, int, 0400);
129MODULE_PARM_DESC(disable_power_well, 136MODULE_PARM_DESC(disable_power_well,
130 "Disable display power wells when possible " 137 "Disable display power wells when possible "
131 "(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)"); 138 "(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)");
@@ -133,6 +140,10 @@ MODULE_PARM_DESC(disable_power_well,
133module_param_named_unsafe(enable_ips, i915.enable_ips, int, 0600); 140module_param_named_unsafe(enable_ips, i915.enable_ips, int, 0600);
134MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)"); 141MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
135 142
143module_param_named(fastboot, i915.fastboot, bool, 0600);
144MODULE_PARM_DESC(fastboot,
145 "Try to skip unnecessary mode sets at boot time (default: false)");
146
136module_param_named_unsafe(prefault_disable, i915.prefault_disable, bool, 0600); 147module_param_named_unsafe(prefault_disable, i915.prefault_disable, bool, 0600);
137MODULE_PARM_DESC(prefault_disable, 148MODULE_PARM_DESC(prefault_disable,
138 "Disable page prefaulting for pread/pwrite/reloc (default:false). " 149 "Disable page prefaulting for pread/pwrite/reloc (default:false). "
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index bc7b8faba84d..206b213a74e1 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -25,14 +25,43 @@
25#ifndef _I915_REG_H_ 25#ifndef _I915_REG_H_
26#define _I915_REG_H_ 26#define _I915_REG_H_
27 27
28typedef struct {
29 uint32_t reg;
30} i915_reg_t;
31
32#define _MMIO(r) ((const i915_reg_t){ .reg = (r) })
33
34#define INVALID_MMIO_REG _MMIO(0)
35
36static inline uint32_t i915_mmio_reg_offset(i915_reg_t reg)
37{
38 return reg.reg;
39}
40
41static inline bool i915_mmio_reg_equal(i915_reg_t a, i915_reg_t b)
42{
43 return i915_mmio_reg_offset(a) == i915_mmio_reg_offset(b);
44}
45
46static inline bool i915_mmio_reg_valid(i915_reg_t reg)
47{
48 return !i915_mmio_reg_equal(reg, INVALID_MMIO_REG);
49}
50
28#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) 51#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
52#define _MMIO_PIPE(pipe, a, b) _MMIO(_PIPE(pipe, a, b))
29#define _PLANE(plane, a, b) _PIPE(plane, a, b) 53#define _PLANE(plane, a, b) _PIPE(plane, a, b)
30#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a))) 54#define _MMIO_PLANE(plane, a, b) _MMIO_PIPE(plane, a, b)
55#define _TRANS(tran, a, b) ((a) + (tran)*((b)-(a)))
56#define _MMIO_TRANS(tran, a, b) _MMIO(_TRANS(tran, a, b))
31#define _PORT(port, a, b) ((a) + (port)*((b)-(a))) 57#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
58#define _MMIO_PORT(port, a, b) _MMIO(_PORT(port, a, b))
32#define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \ 59#define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \
33 (pipe) == PIPE_B ? (b) : (c)) 60 (pipe) == PIPE_B ? (b) : (c))
61#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PIPE3(pipe, a, b, c))
34#define _PORT3(port, a, b, c) ((port) == PORT_A ? (a) : \ 62#define _PORT3(port, a, b, c) ((port) == PORT_A ? (a) : \
35 (port) == PORT_B ? (b) : (c)) 63 (port) == PORT_B ? (b) : (c))
64#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PORT3(pipe, a, b, c))
36 65
37#define _MASKED_FIELD(mask, value) ({ \ 66#define _MASKED_FIELD(mask, value) ({ \
38 if (__builtin_constant_p(mask)) \ 67 if (__builtin_constant_p(mask)) \
@@ -105,14 +134,14 @@
105#define GRDOM_RESET_STATUS (1<<1) 134#define GRDOM_RESET_STATUS (1<<1)
106#define GRDOM_RESET_ENABLE (1<<0) 135#define GRDOM_RESET_ENABLE (1<<0)
107 136
108#define ILK_GDSR (MCHBAR_MIRROR_BASE + 0x2ca4) 137#define ILK_GDSR _MMIO(MCHBAR_MIRROR_BASE + 0x2ca4)
109#define ILK_GRDOM_FULL (0<<1) 138#define ILK_GRDOM_FULL (0<<1)
110#define ILK_GRDOM_RENDER (1<<1) 139#define ILK_GRDOM_RENDER (1<<1)
111#define ILK_GRDOM_MEDIA (3<<1) 140#define ILK_GRDOM_MEDIA (3<<1)
112#define ILK_GRDOM_MASK (3<<1) 141#define ILK_GRDOM_MASK (3<<1)
113#define ILK_GRDOM_RESET_ENABLE (1<<0) 142#define ILK_GRDOM_RESET_ENABLE (1<<0)
114 143
115#define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */ 144#define GEN6_MBCUNIT_SNPCR _MMIO(0x900c) /* for LLC config */
116#define GEN6_MBC_SNPCR_SHIFT 21 145#define GEN6_MBC_SNPCR_SHIFT 21
117#define GEN6_MBC_SNPCR_MASK (3<<21) 146#define GEN6_MBC_SNPCR_MASK (3<<21)
118#define GEN6_MBC_SNPCR_MAX (0<<21) 147#define GEN6_MBC_SNPCR_MAX (0<<21)
@@ -120,31 +149,31 @@
120#define GEN6_MBC_SNPCR_LOW (2<<21) 149#define GEN6_MBC_SNPCR_LOW (2<<21)
121#define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */ 150#define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */
122 151
123#define VLV_G3DCTL 0x9024 152#define VLV_G3DCTL _MMIO(0x9024)
124#define VLV_GSCKGCTL 0x9028 153#define VLV_GSCKGCTL _MMIO(0x9028)
125 154
126#define GEN6_MBCTL 0x0907c 155#define GEN6_MBCTL _MMIO(0x0907c)
127#define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4) 156#define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4)
128#define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3) 157#define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3)
129#define GEN6_MBCTL_BME_UPDATE_ENABLE (1 << 2) 158#define GEN6_MBCTL_BME_UPDATE_ENABLE (1 << 2)
130#define GEN6_MBCTL_MAE_UPDATE_ENABLE (1 << 1) 159#define GEN6_MBCTL_MAE_UPDATE_ENABLE (1 << 1)
131#define GEN6_MBCTL_BOOT_FETCH_MECH (1 << 0) 160#define GEN6_MBCTL_BOOT_FETCH_MECH (1 << 0)
132 161
133#define GEN6_GDRST 0x941c 162#define GEN6_GDRST _MMIO(0x941c)
134#define GEN6_GRDOM_FULL (1 << 0) 163#define GEN6_GRDOM_FULL (1 << 0)
135#define GEN6_GRDOM_RENDER (1 << 1) 164#define GEN6_GRDOM_RENDER (1 << 1)
136#define GEN6_GRDOM_MEDIA (1 << 2) 165#define GEN6_GRDOM_MEDIA (1 << 2)
137#define GEN6_GRDOM_BLT (1 << 3) 166#define GEN6_GRDOM_BLT (1 << 3)
138 167
139#define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228) 168#define RING_PP_DIR_BASE(ring) _MMIO((ring)->mmio_base+0x228)
140#define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518) 169#define RING_PP_DIR_BASE_READ(ring) _MMIO((ring)->mmio_base+0x518)
141#define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220) 170#define RING_PP_DIR_DCLV(ring) _MMIO((ring)->mmio_base+0x220)
142#define PP_DIR_DCLV_2G 0xffffffff 171#define PP_DIR_DCLV_2G 0xffffffff
143 172
144#define GEN8_RING_PDP_UDW(ring, n) ((ring)->mmio_base+0x270 + ((n) * 8 + 4)) 173#define GEN8_RING_PDP_UDW(ring, n) _MMIO((ring)->mmio_base+0x270 + (n) * 8 + 4)
145#define GEN8_RING_PDP_LDW(ring, n) ((ring)->mmio_base+0x270 + (n) * 8) 174#define GEN8_RING_PDP_LDW(ring, n) _MMIO((ring)->mmio_base+0x270 + (n) * 8)
146 175
147#define GEN8_R_PWR_CLK_STATE 0x20C8 176#define GEN8_R_PWR_CLK_STATE _MMIO(0x20C8)
148#define GEN8_RPCS_ENABLE (1 << 31) 177#define GEN8_RPCS_ENABLE (1 << 31)
149#define GEN8_RPCS_S_CNT_ENABLE (1 << 18) 178#define GEN8_RPCS_S_CNT_ENABLE (1 << 18)
150#define GEN8_RPCS_S_CNT_SHIFT 15 179#define GEN8_RPCS_S_CNT_SHIFT 15
@@ -157,7 +186,7 @@
157#define GEN8_RPCS_EU_MIN_SHIFT 0 186#define GEN8_RPCS_EU_MIN_SHIFT 0
158#define GEN8_RPCS_EU_MIN_MASK (0xf << GEN8_RPCS_EU_MIN_SHIFT) 187#define GEN8_RPCS_EU_MIN_MASK (0xf << GEN8_RPCS_EU_MIN_SHIFT)
159 188
160#define GAM_ECOCHK 0x4090 189#define GAM_ECOCHK _MMIO(0x4090)
161#define BDW_DISABLE_HDC_INVALIDATION (1<<25) 190#define BDW_DISABLE_HDC_INVALIDATION (1<<25)
162#define ECOCHK_SNB_BIT (1<<10) 191#define ECOCHK_SNB_BIT (1<<10)
163#define ECOCHK_DIS_TLB (1<<8) 192#define ECOCHK_DIS_TLB (1<<8)
@@ -170,15 +199,15 @@
170#define ECOCHK_PPGTT_WT_HSW (0x2<<3) 199#define ECOCHK_PPGTT_WT_HSW (0x2<<3)
171#define ECOCHK_PPGTT_WB_HSW (0x3<<3) 200#define ECOCHK_PPGTT_WB_HSW (0x3<<3)
172 201
173#define GAC_ECO_BITS 0x14090 202#define GAC_ECO_BITS _MMIO(0x14090)
174#define ECOBITS_SNB_BIT (1<<13) 203#define ECOBITS_SNB_BIT (1<<13)
175#define ECOBITS_PPGTT_CACHE64B (3<<8) 204#define ECOBITS_PPGTT_CACHE64B (3<<8)
176#define ECOBITS_PPGTT_CACHE4B (0<<8) 205#define ECOBITS_PPGTT_CACHE4B (0<<8)
177 206
178#define GAB_CTL 0x24000 207#define GAB_CTL _MMIO(0x24000)
179#define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8) 208#define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8)
180 209
181#define GEN6_STOLEN_RESERVED 0x1082C0 210#define GEN6_STOLEN_RESERVED _MMIO(0x1082C0)
182#define GEN6_STOLEN_RESERVED_ADDR_MASK (0xFFF << 20) 211#define GEN6_STOLEN_RESERVED_ADDR_MASK (0xFFF << 20)
183#define GEN7_STOLEN_RESERVED_ADDR_MASK (0x3FFF << 18) 212#define GEN7_STOLEN_RESERVED_ADDR_MASK (0x3FFF << 18)
184#define GEN6_STOLEN_RESERVED_SIZE_MASK (3 << 4) 213#define GEN6_STOLEN_RESERVED_SIZE_MASK (3 << 4)
@@ -200,6 +229,7 @@
200#define VGA_ST01_MDA 0x3ba 229#define VGA_ST01_MDA 0x3ba
201#define VGA_ST01_CGA 0x3da 230#define VGA_ST01_CGA 0x3da
202 231
232#define _VGA_MSR_WRITE _MMIO(0x3c2)
203#define VGA_MSR_WRITE 0x3c2 233#define VGA_MSR_WRITE 0x3c2
204#define VGA_MSR_READ 0x3cc 234#define VGA_MSR_READ 0x3cc
205#define VGA_MSR_MEM_EN (1<<1) 235#define VGA_MSR_MEM_EN (1<<1)
@@ -377,10 +407,12 @@
377#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1) 407#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1)
378#define MI_BATCH_RESOURCE_STREAMER (1<<10) 408#define MI_BATCH_RESOURCE_STREAMER (1<<10)
379 409
380#define MI_PREDICATE_SRC0 (0x2400) 410#define MI_PREDICATE_SRC0 _MMIO(0x2400)
381#define MI_PREDICATE_SRC1 (0x2408) 411#define MI_PREDICATE_SRC0_UDW _MMIO(0x2400 + 4)
412#define MI_PREDICATE_SRC1 _MMIO(0x2408)
413#define MI_PREDICATE_SRC1_UDW _MMIO(0x2408 + 4)
382 414
383#define MI_PREDICATE_RESULT_2 (0x2214) 415#define MI_PREDICATE_RESULT_2 _MMIO(0x2214)
384#define LOWER_SLICE_ENABLED (1<<0) 416#define LOWER_SLICE_ENABLED (1<<0)
385#define LOWER_SLICE_DISABLED (0<<0) 417#define LOWER_SLICE_DISABLED (0<<0)
386 418
@@ -509,49 +541,61 @@
509/* 541/*
510 * Registers used only by the command parser 542 * Registers used only by the command parser
511 */ 543 */
512#define BCS_SWCTRL 0x22200 544#define BCS_SWCTRL _MMIO(0x22200)
513 545
514#define GPGPU_THREADS_DISPATCHED 0x2290 546#define GPGPU_THREADS_DISPATCHED _MMIO(0x2290)
515#define HS_INVOCATION_COUNT 0x2300 547#define GPGPU_THREADS_DISPATCHED_UDW _MMIO(0x2290 + 4)
516#define DS_INVOCATION_COUNT 0x2308 548#define HS_INVOCATION_COUNT _MMIO(0x2300)
517#define IA_VERTICES_COUNT 0x2310 549#define HS_INVOCATION_COUNT_UDW _MMIO(0x2300 + 4)
518#define IA_PRIMITIVES_COUNT 0x2318 550#define DS_INVOCATION_COUNT _MMIO(0x2308)
519#define VS_INVOCATION_COUNT 0x2320 551#define DS_INVOCATION_COUNT_UDW _MMIO(0x2308 + 4)
520#define GS_INVOCATION_COUNT 0x2328 552#define IA_VERTICES_COUNT _MMIO(0x2310)
521#define GS_PRIMITIVES_COUNT 0x2330 553#define IA_VERTICES_COUNT_UDW _MMIO(0x2310 + 4)
522#define CL_INVOCATION_COUNT 0x2338 554#define IA_PRIMITIVES_COUNT _MMIO(0x2318)
523#define CL_PRIMITIVES_COUNT 0x2340 555#define IA_PRIMITIVES_COUNT_UDW _MMIO(0x2318 + 4)
524#define PS_INVOCATION_COUNT 0x2348 556#define VS_INVOCATION_COUNT _MMIO(0x2320)
525#define PS_DEPTH_COUNT 0x2350 557#define VS_INVOCATION_COUNT_UDW _MMIO(0x2320 + 4)
558#define GS_INVOCATION_COUNT _MMIO(0x2328)
559#define GS_INVOCATION_COUNT_UDW _MMIO(0x2328 + 4)
560#define GS_PRIMITIVES_COUNT _MMIO(0x2330)
561#define GS_PRIMITIVES_COUNT_UDW _MMIO(0x2330 + 4)
562#define CL_INVOCATION_COUNT _MMIO(0x2338)
563#define CL_INVOCATION_COUNT_UDW _MMIO(0x2338 + 4)
564#define CL_PRIMITIVES_COUNT _MMIO(0x2340)
565#define CL_PRIMITIVES_COUNT_UDW _MMIO(0x2340 + 4)
566#define PS_INVOCATION_COUNT _MMIO(0x2348)
567#define PS_INVOCATION_COUNT_UDW _MMIO(0x2348 + 4)
568#define PS_DEPTH_COUNT _MMIO(0x2350)
569#define PS_DEPTH_COUNT_UDW _MMIO(0x2350 + 4)
526 570
527/* There are the 4 64-bit counter registers, one for each stream output */ 571/* There are the 4 64-bit counter registers, one for each stream output */
528#define GEN7_SO_NUM_PRIMS_WRITTEN(n) (0x5200 + (n) * 8) 572#define GEN7_SO_NUM_PRIMS_WRITTEN(n) _MMIO(0x5200 + (n) * 8)
573#define GEN7_SO_NUM_PRIMS_WRITTEN_UDW(n) _MMIO(0x5200 + (n) * 8 + 4)
529 574
530#define GEN7_SO_PRIM_STORAGE_NEEDED(n) (0x5240 + (n) * 8) 575#define GEN7_SO_PRIM_STORAGE_NEEDED(n) _MMIO(0x5240 + (n) * 8)
576#define GEN7_SO_PRIM_STORAGE_NEEDED_UDW(n) _MMIO(0x5240 + (n) * 8 + 4)
531 577
532#define GEN7_3DPRIM_END_OFFSET 0x2420 578#define GEN7_3DPRIM_END_OFFSET _MMIO(0x2420)
533#define GEN7_3DPRIM_START_VERTEX 0x2430 579#define GEN7_3DPRIM_START_VERTEX _MMIO(0x2430)
534#define GEN7_3DPRIM_VERTEX_COUNT 0x2434 580#define GEN7_3DPRIM_VERTEX_COUNT _MMIO(0x2434)
535#define GEN7_3DPRIM_INSTANCE_COUNT 0x2438 581#define GEN7_3DPRIM_INSTANCE_COUNT _MMIO(0x2438)
536#define GEN7_3DPRIM_START_INSTANCE 0x243C 582#define GEN7_3DPRIM_START_INSTANCE _MMIO(0x243C)
537#define GEN7_3DPRIM_BASE_VERTEX 0x2440 583#define GEN7_3DPRIM_BASE_VERTEX _MMIO(0x2440)
538 584
539#define GEN7_GPGPU_DISPATCHDIMX 0x2500 585#define GEN7_GPGPU_DISPATCHDIMX _MMIO(0x2500)
540#define GEN7_GPGPU_DISPATCHDIMY 0x2504 586#define GEN7_GPGPU_DISPATCHDIMY _MMIO(0x2504)
541#define GEN7_GPGPU_DISPATCHDIMZ 0x2508 587#define GEN7_GPGPU_DISPATCHDIMZ _MMIO(0x2508)
542 588
543#define OACONTROL 0x2360 589#define OACONTROL _MMIO(0x2360)
544 590
545#define _GEN7_PIPEA_DE_LOAD_SL 0x70068 591#define _GEN7_PIPEA_DE_LOAD_SL 0x70068
546#define _GEN7_PIPEB_DE_LOAD_SL 0x71068 592#define _GEN7_PIPEB_DE_LOAD_SL 0x71068
547#define GEN7_PIPE_DE_LOAD_SL(pipe) _PIPE(pipe, \ 593#define GEN7_PIPE_DE_LOAD_SL(pipe) _MMIO_PIPE(pipe, _GEN7_PIPEA_DE_LOAD_SL, _GEN7_PIPEB_DE_LOAD_SL)
548 _GEN7_PIPEA_DE_LOAD_SL, \
549 _GEN7_PIPEB_DE_LOAD_SL)
550 594
551/* 595/*
552 * Reset registers 596 * Reset registers
553 */ 597 */
554#define DEBUG_RESET_I830 0x6070 598#define DEBUG_RESET_I830 _MMIO(0x6070)
555#define DEBUG_RESET_FULL (1<<7) 599#define DEBUG_RESET_FULL (1<<7)
556#define DEBUG_RESET_RENDER (1<<8) 600#define DEBUG_RESET_RENDER (1<<8)
557#define DEBUG_RESET_DISPLAY (1<<9) 601#define DEBUG_RESET_DISPLAY (1<<9)
@@ -559,7 +603,7 @@
559/* 603/*
560 * IOSF sideband 604 * IOSF sideband
561 */ 605 */
562#define VLV_IOSF_DOORBELL_REQ (VLV_DISPLAY_BASE + 0x2100) 606#define VLV_IOSF_DOORBELL_REQ _MMIO(VLV_DISPLAY_BASE + 0x2100)
563#define IOSF_DEVFN_SHIFT 24 607#define IOSF_DEVFN_SHIFT 24
564#define IOSF_OPCODE_SHIFT 16 608#define IOSF_OPCODE_SHIFT 16
565#define IOSF_PORT_SHIFT 8 609#define IOSF_PORT_SHIFT 8
@@ -576,8 +620,8 @@
576#define IOSF_PORT_CCU 0xA9 620#define IOSF_PORT_CCU 0xA9
577#define IOSF_PORT_GPS_CORE 0x48 621#define IOSF_PORT_GPS_CORE 0x48
578#define IOSF_PORT_FLISDSI 0x1B 622#define IOSF_PORT_FLISDSI 0x1B
579#define VLV_IOSF_DATA (VLV_DISPLAY_BASE + 0x2104) 623#define VLV_IOSF_DATA _MMIO(VLV_DISPLAY_BASE + 0x2104)
580#define VLV_IOSF_ADDR (VLV_DISPLAY_BASE + 0x2108) 624#define VLV_IOSF_ADDR _MMIO(VLV_DISPLAY_BASE + 0x2108)
581 625
582/* See configdb bunit SB addr map */ 626/* See configdb bunit SB addr map */
583#define BUNIT_REG_BISOC 0x11 627#define BUNIT_REG_BISOC 0x11
@@ -609,6 +653,7 @@
609 653
610/* See the PUNIT HAS v0.8 for the below bits */ 654/* See the PUNIT HAS v0.8 for the below bits */
611enum punit_power_well { 655enum punit_power_well {
656 /* These numbers are fixed and must match the position of the pw bits */
612 PUNIT_POWER_WELL_RENDER = 0, 657 PUNIT_POWER_WELL_RENDER = 0,
613 PUNIT_POWER_WELL_MEDIA = 1, 658 PUNIT_POWER_WELL_MEDIA = 1,
614 PUNIT_POWER_WELL_DISP2D = 3, 659 PUNIT_POWER_WELL_DISP2D = 3,
@@ -621,10 +666,12 @@ enum punit_power_well {
621 PUNIT_POWER_WELL_DPIO_RX1 = 11, 666 PUNIT_POWER_WELL_DPIO_RX1 = 11,
622 PUNIT_POWER_WELL_DPIO_CMN_D = 12, 667 PUNIT_POWER_WELL_DPIO_CMN_D = 12,
623 668
624 PUNIT_POWER_WELL_NUM, 669 /* Not actual bit groups. Used as IDs for lookup_power_well() */
670 PUNIT_POWER_WELL_ALWAYS_ON,
625}; 671};
626 672
627enum skl_disp_power_wells { 673enum skl_disp_power_wells {
674 /* These numbers are fixed and must match the position of the pw bits */
628 SKL_DISP_PW_MISC_IO, 675 SKL_DISP_PW_MISC_IO,
629 SKL_DISP_PW_DDI_A_E, 676 SKL_DISP_PW_DDI_A_E,
630 SKL_DISP_PW_DDI_B, 677 SKL_DISP_PW_DDI_B,
@@ -632,6 +679,10 @@ enum skl_disp_power_wells {
632 SKL_DISP_PW_DDI_D, 679 SKL_DISP_PW_DDI_D,
633 SKL_DISP_PW_1 = 14, 680 SKL_DISP_PW_1 = 14,
634 SKL_DISP_PW_2, 681 SKL_DISP_PW_2,
682
683 /* Not actual bit groups. Used as IDs for lookup_power_well() */
684 SKL_DISP_PW_ALWAYS_ON,
685 SKL_DISP_PW_DC_OFF,
635}; 686};
636 687
637#define SKL_POWER_WELL_STATE(pw) (1 << ((pw) * 2)) 688#define SKL_POWER_WELL_STATE(pw) (1 << ((pw) * 2))
@@ -832,7 +883,7 @@ enum skl_disp_power_wells {
832 */ 883 */
833#define DPIO_DEVFN 0 884#define DPIO_DEVFN 0
834 885
835#define DPIO_CTL (VLV_DISPLAY_BASE + 0x2110) 886#define DPIO_CTL _MMIO(VLV_DISPLAY_BASE + 0x2110)
836#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */ 887#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
837#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */ 888#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */
838#define DPIO_SFR_BYPASS (1<<1) 889#define DPIO_SFR_BYPASS (1<<1)
@@ -1185,9 +1236,9 @@ enum skl_disp_power_wells {
1185#define DPIO_UPAR_SHIFT 30 1236#define DPIO_UPAR_SHIFT 30
1186 1237
1187/* BXT PHY registers */ 1238/* BXT PHY registers */
1188#define _BXT_PHY(phy, a, b) _PIPE((phy), (a), (b)) 1239#define _BXT_PHY(phy, a, b) _MMIO_PIPE((phy), (a), (b))
1189 1240
1190#define BXT_P_CR_GT_DISP_PWRON 0x138090 1241#define BXT_P_CR_GT_DISP_PWRON _MMIO(0x138090)
1191#define GT_DISPLAY_POWER_ON(phy) (1 << (phy)) 1242#define GT_DISPLAY_POWER_ON(phy) (1 << (phy))
1192 1243
1193#define _PHY_CTL_FAMILY_EDP 0x64C80 1244#define _PHY_CTL_FAMILY_EDP 0x64C80
@@ -1203,7 +1254,7 @@ enum skl_disp_power_wells {
1203#define PORT_PLL_ENABLE (1 << 31) 1254#define PORT_PLL_ENABLE (1 << 31)
1204#define PORT_PLL_LOCK (1 << 30) 1255#define PORT_PLL_LOCK (1 << 30)
1205#define PORT_PLL_REF_SEL (1 << 27) 1256#define PORT_PLL_REF_SEL (1 << 27)
1206#define BXT_PORT_PLL_ENABLE(port) _PORT(port, _PORT_PLL_A, _PORT_PLL_B) 1257#define BXT_PORT_PLL_ENABLE(port) _MMIO_PORT(port, _PORT_PLL_A, _PORT_PLL_B)
1207 1258
1208#define _PORT_PLL_EBB_0_A 0x162034 1259#define _PORT_PLL_EBB_0_A 0x162034
1209#define _PORT_PLL_EBB_0_B 0x6C034 1260#define _PORT_PLL_EBB_0_B 0x6C034
@@ -1214,7 +1265,7 @@ enum skl_disp_power_wells {
1214#define PORT_PLL_P2_SHIFT 8 1265#define PORT_PLL_P2_SHIFT 8
1215#define PORT_PLL_P2_MASK (0x1f << PORT_PLL_P2_SHIFT) 1266#define PORT_PLL_P2_MASK (0x1f << PORT_PLL_P2_SHIFT)
1216#define PORT_PLL_P2(x) ((x) << PORT_PLL_P2_SHIFT) 1267#define PORT_PLL_P2(x) ((x) << PORT_PLL_P2_SHIFT)
1217#define BXT_PORT_PLL_EBB_0(port) _PORT3(port, _PORT_PLL_EBB_0_A, \ 1268#define BXT_PORT_PLL_EBB_0(port) _MMIO_PORT3(port, _PORT_PLL_EBB_0_A, \
1218 _PORT_PLL_EBB_0_B, \ 1269 _PORT_PLL_EBB_0_B, \
1219 _PORT_PLL_EBB_0_C) 1270 _PORT_PLL_EBB_0_C)
1220 1271
@@ -1223,7 +1274,7 @@ enum skl_disp_power_wells {
1223#define _PORT_PLL_EBB_4_C 0x6C344 1274#define _PORT_PLL_EBB_4_C 0x6C344
1224#define PORT_PLL_10BIT_CLK_ENABLE (1 << 13) 1275#define PORT_PLL_10BIT_CLK_ENABLE (1 << 13)
1225#define PORT_PLL_RECALIBRATE (1 << 14) 1276#define PORT_PLL_RECALIBRATE (1 << 14)
1226#define BXT_PORT_PLL_EBB_4(port) _PORT3(port, _PORT_PLL_EBB_4_A, \ 1277#define BXT_PORT_PLL_EBB_4(port) _MMIO_PORT3(port, _PORT_PLL_EBB_4_A, \
1227 _PORT_PLL_EBB_4_B, \ 1278 _PORT_PLL_EBB_4_B, \
1228 _PORT_PLL_EBB_4_C) 1279 _PORT_PLL_EBB_4_C)
1229 1280
@@ -1259,7 +1310,7 @@ enum skl_disp_power_wells {
1259#define _PORT_PLL_BASE(port) _PORT3(port, _PORT_PLL_0_A, \ 1310#define _PORT_PLL_BASE(port) _PORT3(port, _PORT_PLL_0_A, \
1260 _PORT_PLL_0_B, \ 1311 _PORT_PLL_0_B, \
1261 _PORT_PLL_0_C) 1312 _PORT_PLL_0_C)
1262#define BXT_PORT_PLL(port, idx) (_PORT_PLL_BASE(port) + (idx) * 4) 1313#define BXT_PORT_PLL(port, idx) _MMIO(_PORT_PLL_BASE(port) + (idx) * 4)
1263 1314
1264/* BXT PHY common lane registers */ 1315/* BXT PHY common lane registers */
1265#define _PORT_CL1CM_DW0_A 0x162000 1316#define _PORT_CL1CM_DW0_A 0x162000
@@ -1297,7 +1348,7 @@ enum skl_disp_power_wells {
1297 _PORT_CL1CM_DW30_A) 1348 _PORT_CL1CM_DW30_A)
1298 1349
1299/* Defined for PHY0 only */ 1350/* Defined for PHY0 only */
1300#define BXT_PORT_CL2CM_DW6_BC 0x6C358 1351#define BXT_PORT_CL2CM_DW6_BC _MMIO(0x6C358)
1301#define DW6_OLDO_DYN_PWR_DOWN_EN (1 << 28) 1352#define DW6_OLDO_DYN_PWR_DOWN_EN (1 << 28)
1302 1353
1303/* BXT PHY Ref registers */ 1354/* BXT PHY Ref registers */
@@ -1337,10 +1388,10 @@ enum skl_disp_power_wells {
1337#define _PORT_PCS_DW10_GRP_A 0x162C28 1388#define _PORT_PCS_DW10_GRP_A 0x162C28
1338#define _PORT_PCS_DW10_GRP_B 0x6CC28 1389#define _PORT_PCS_DW10_GRP_B 0x6CC28
1339#define _PORT_PCS_DW10_GRP_C 0x6CE28 1390#define _PORT_PCS_DW10_GRP_C 0x6CE28
1340#define BXT_PORT_PCS_DW10_LN01(port) _PORT3(port, _PORT_PCS_DW10_LN01_A, \ 1391#define BXT_PORT_PCS_DW10_LN01(port) _MMIO_PORT3(port, _PORT_PCS_DW10_LN01_A, \
1341 _PORT_PCS_DW10_LN01_B, \ 1392 _PORT_PCS_DW10_LN01_B, \
1342 _PORT_PCS_DW10_LN01_C) 1393 _PORT_PCS_DW10_LN01_C)
1343#define BXT_PORT_PCS_DW10_GRP(port) _PORT3(port, _PORT_PCS_DW10_GRP_A, \ 1394#define BXT_PORT_PCS_DW10_GRP(port) _MMIO_PORT3(port, _PORT_PCS_DW10_GRP_A, \
1344 _PORT_PCS_DW10_GRP_B, \ 1395 _PORT_PCS_DW10_GRP_B, \
1345 _PORT_PCS_DW10_GRP_C) 1396 _PORT_PCS_DW10_GRP_C)
1346#define TX2_SWING_CALC_INIT (1 << 31) 1397#define TX2_SWING_CALC_INIT (1 << 31)
@@ -1357,13 +1408,13 @@ enum skl_disp_power_wells {
1357#define _PORT_PCS_DW12_GRP_C 0x6CE30 1408#define _PORT_PCS_DW12_GRP_C 0x6CE30
1358#define LANESTAGGER_STRAP_OVRD (1 << 6) 1409#define LANESTAGGER_STRAP_OVRD (1 << 6)
1359#define LANE_STAGGER_MASK 0x1F 1410#define LANE_STAGGER_MASK 0x1F
1360#define BXT_PORT_PCS_DW12_LN01(port) _PORT3(port, _PORT_PCS_DW12_LN01_A, \ 1411#define BXT_PORT_PCS_DW12_LN01(port) _MMIO_PORT3(port, _PORT_PCS_DW12_LN01_A, \
1361 _PORT_PCS_DW12_LN01_B, \ 1412 _PORT_PCS_DW12_LN01_B, \
1362 _PORT_PCS_DW12_LN01_C) 1413 _PORT_PCS_DW12_LN01_C)
1363#define BXT_PORT_PCS_DW12_LN23(port) _PORT3(port, _PORT_PCS_DW12_LN23_A, \ 1414#define BXT_PORT_PCS_DW12_LN23(port) _MMIO_PORT3(port, _PORT_PCS_DW12_LN23_A, \
1364 _PORT_PCS_DW12_LN23_B, \ 1415 _PORT_PCS_DW12_LN23_B, \
1365 _PORT_PCS_DW12_LN23_C) 1416 _PORT_PCS_DW12_LN23_C)
1366#define BXT_PORT_PCS_DW12_GRP(port) _PORT3(port, _PORT_PCS_DW12_GRP_A, \ 1417#define BXT_PORT_PCS_DW12_GRP(port) _MMIO_PORT3(port, _PORT_PCS_DW12_GRP_A, \
1367 _PORT_PCS_DW12_GRP_B, \ 1418 _PORT_PCS_DW12_GRP_B, \
1368 _PORT_PCS_DW12_GRP_C) 1419 _PORT_PCS_DW12_GRP_C)
1369 1420
@@ -1377,10 +1428,10 @@ enum skl_disp_power_wells {
1377#define _PORT_TX_DW2_GRP_A 0x162D08 1428#define _PORT_TX_DW2_GRP_A 0x162D08
1378#define _PORT_TX_DW2_GRP_B 0x6CD08 1429#define _PORT_TX_DW2_GRP_B 0x6CD08
1379#define _PORT_TX_DW2_GRP_C 0x6CF08 1430#define _PORT_TX_DW2_GRP_C 0x6CF08
1380#define BXT_PORT_TX_DW2_GRP(port) _PORT3(port, _PORT_TX_DW2_GRP_A, \ 1431#define BXT_PORT_TX_DW2_GRP(port) _MMIO_PORT3(port, _PORT_TX_DW2_GRP_A, \
1381 _PORT_TX_DW2_GRP_B, \ 1432 _PORT_TX_DW2_GRP_B, \
1382 _PORT_TX_DW2_GRP_C) 1433 _PORT_TX_DW2_GRP_C)
1383#define BXT_PORT_TX_DW2_LN0(port) _PORT3(port, _PORT_TX_DW2_LN0_A, \ 1434#define BXT_PORT_TX_DW2_LN0(port) _MMIO_PORT3(port, _PORT_TX_DW2_LN0_A, \
1384 _PORT_TX_DW2_LN0_B, \ 1435 _PORT_TX_DW2_LN0_B, \
1385 _PORT_TX_DW2_LN0_C) 1436 _PORT_TX_DW2_LN0_C)
1386#define MARGIN_000_SHIFT 16 1437#define MARGIN_000_SHIFT 16
@@ -1394,10 +1445,10 @@ enum skl_disp_power_wells {
1394#define _PORT_TX_DW3_GRP_A 0x162D0C 1445#define _PORT_TX_DW3_GRP_A 0x162D0C
1395#define _PORT_TX_DW3_GRP_B 0x6CD0C 1446#define _PORT_TX_DW3_GRP_B 0x6CD0C
1396#define _PORT_TX_DW3_GRP_C 0x6CF0C 1447#define _PORT_TX_DW3_GRP_C 0x6CF0C
1397#define BXT_PORT_TX_DW3_GRP(port) _PORT3(port, _PORT_TX_DW3_GRP_A, \ 1448#define BXT_PORT_TX_DW3_GRP(port) _MMIO_PORT3(port, _PORT_TX_DW3_GRP_A, \
1398 _PORT_TX_DW3_GRP_B, \ 1449 _PORT_TX_DW3_GRP_B, \
1399 _PORT_TX_DW3_GRP_C) 1450 _PORT_TX_DW3_GRP_C)
1400#define BXT_PORT_TX_DW3_LN0(port) _PORT3(port, _PORT_TX_DW3_LN0_A, \ 1451#define BXT_PORT_TX_DW3_LN0(port) _MMIO_PORT3(port, _PORT_TX_DW3_LN0_A, \
1401 _PORT_TX_DW3_LN0_B, \ 1452 _PORT_TX_DW3_LN0_B, \
1402 _PORT_TX_DW3_LN0_C) 1453 _PORT_TX_DW3_LN0_C)
1403#define SCALE_DCOMP_METHOD (1 << 26) 1454#define SCALE_DCOMP_METHOD (1 << 26)
@@ -1409,10 +1460,10 @@ enum skl_disp_power_wells {
1409#define _PORT_TX_DW4_GRP_A 0x162D10 1460#define _PORT_TX_DW4_GRP_A 0x162D10
1410#define _PORT_TX_DW4_GRP_B 0x6CD10 1461#define _PORT_TX_DW4_GRP_B 0x6CD10
1411#define _PORT_TX_DW4_GRP_C 0x6CF10 1462#define _PORT_TX_DW4_GRP_C 0x6CF10
1412#define BXT_PORT_TX_DW4_LN0(port) _PORT3(port, _PORT_TX_DW4_LN0_A, \ 1463#define BXT_PORT_TX_DW4_LN0(port) _MMIO_PORT3(port, _PORT_TX_DW4_LN0_A, \
1413 _PORT_TX_DW4_LN0_B, \ 1464 _PORT_TX_DW4_LN0_B, \
1414 _PORT_TX_DW4_LN0_C) 1465 _PORT_TX_DW4_LN0_C)
1415#define BXT_PORT_TX_DW4_GRP(port) _PORT3(port, _PORT_TX_DW4_GRP_A, \ 1466#define BXT_PORT_TX_DW4_GRP(port) _MMIO_PORT3(port, _PORT_TX_DW4_GRP_A, \
1416 _PORT_TX_DW4_GRP_B, \ 1467 _PORT_TX_DW4_GRP_B, \
1417 _PORT_TX_DW4_GRP_C) 1468 _PORT_TX_DW4_GRP_C)
1418#define DEEMPH_SHIFT 24 1469#define DEEMPH_SHIFT 24
@@ -1423,17 +1474,17 @@ enum skl_disp_power_wells {
1423#define _PORT_TX_DW14_LN0_C 0x6C938 1474#define _PORT_TX_DW14_LN0_C 0x6C938
1424#define LATENCY_OPTIM_SHIFT 30 1475#define LATENCY_OPTIM_SHIFT 30
1425#define LATENCY_OPTIM (1 << LATENCY_OPTIM_SHIFT) 1476#define LATENCY_OPTIM (1 << LATENCY_OPTIM_SHIFT)
1426#define BXT_PORT_TX_DW14_LN(port, lane) (_PORT3((port), _PORT_TX_DW14_LN0_A, \ 1477#define BXT_PORT_TX_DW14_LN(port, lane) _MMIO(_PORT3((port), _PORT_TX_DW14_LN0_A, \
1427 _PORT_TX_DW14_LN0_B, \ 1478 _PORT_TX_DW14_LN0_B, \
1428 _PORT_TX_DW14_LN0_C) + \ 1479 _PORT_TX_DW14_LN0_C) + \
1429 _BXT_LANE_OFFSET(lane)) 1480 _BXT_LANE_OFFSET(lane))
1430 1481
1431/* UAIMI scratch pad register 1 */ 1482/* UAIMI scratch pad register 1 */
1432#define UAIMI_SPR1 0x4F074 1483#define UAIMI_SPR1 _MMIO(0x4F074)
1433/* SKL VccIO mask */ 1484/* SKL VccIO mask */
1434#define SKL_VCCIO_MASK 0x1 1485#define SKL_VCCIO_MASK 0x1
1435/* SKL balance leg register */ 1486/* SKL balance leg register */
1436#define DISPIO_CR_TX_BMU_CR0 0x6C00C 1487#define DISPIO_CR_TX_BMU_CR0 _MMIO(0x6C00C)
1437/* I_boost values */ 1488/* I_boost values */
1438#define BALANCE_LEG_SHIFT(port) (8+3*(port)) 1489#define BALANCE_LEG_SHIFT(port) (8+3*(port))
1439#define BALANCE_LEG_MASK(port) (7<<(8+3*(port))) 1490#define BALANCE_LEG_MASK(port) (7<<(8+3*(port)))
@@ -1450,7 +1501,7 @@ enum skl_disp_power_wells {
1450 * [0-15] @ 0x100000 gen6,vlv,chv 1501 * [0-15] @ 0x100000 gen6,vlv,chv
1451 * [0-31] @ 0x100000 gen7+ 1502 * [0-31] @ 0x100000 gen7+
1452 */ 1503 */
1453#define FENCE_REG(i) (0x2000 + (((i) & 8) << 9) + ((i) & 7) * 4) 1504#define FENCE_REG(i) _MMIO(0x2000 + (((i) & 8) << 9) + ((i) & 7) * 4)
1454#define I830_FENCE_START_MASK 0x07f80000 1505#define I830_FENCE_START_MASK 0x07f80000
1455#define I830_FENCE_TILING_Y_SHIFT 12 1506#define I830_FENCE_TILING_Y_SHIFT 12
1456#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) 1507#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8)
@@ -1463,21 +1514,21 @@ enum skl_disp_power_wells {
1463#define I915_FENCE_START_MASK 0x0ff00000 1514#define I915_FENCE_START_MASK 0x0ff00000
1464#define I915_FENCE_SIZE_BITS(size) ((ffs((size) >> 20) - 1) << 8) 1515#define I915_FENCE_SIZE_BITS(size) ((ffs((size) >> 20) - 1) << 8)
1465 1516
1466#define FENCE_REG_965_LO(i) (0x03000 + (i) * 8) 1517#define FENCE_REG_965_LO(i) _MMIO(0x03000 + (i) * 8)
1467#define FENCE_REG_965_HI(i) (0x03000 + (i) * 8 + 4) 1518#define FENCE_REG_965_HI(i) _MMIO(0x03000 + (i) * 8 + 4)
1468#define I965_FENCE_PITCH_SHIFT 2 1519#define I965_FENCE_PITCH_SHIFT 2
1469#define I965_FENCE_TILING_Y_SHIFT 1 1520#define I965_FENCE_TILING_Y_SHIFT 1
1470#define I965_FENCE_REG_VALID (1<<0) 1521#define I965_FENCE_REG_VALID (1<<0)
1471#define I965_FENCE_MAX_PITCH_VAL 0x0400 1522#define I965_FENCE_MAX_PITCH_VAL 0x0400
1472 1523
1473#define FENCE_REG_GEN6_LO(i) (0x100000 + (i) * 8) 1524#define FENCE_REG_GEN6_LO(i) _MMIO(0x100000 + (i) * 8)
1474#define FENCE_REG_GEN6_HI(i) (0x100000 + (i) * 8 + 4) 1525#define FENCE_REG_GEN6_HI(i) _MMIO(0x100000 + (i) * 8 + 4)
1475#define GEN6_FENCE_PITCH_SHIFT 32 1526#define GEN6_FENCE_PITCH_SHIFT 32
1476#define GEN7_FENCE_MAX_PITCH_VAL 0x0800 1527#define GEN7_FENCE_MAX_PITCH_VAL 0x0800
1477 1528
1478 1529
1479/* control register for cpu gtt access */ 1530/* control register for cpu gtt access */
1480#define TILECTL 0x101000 1531#define TILECTL _MMIO(0x101000)
1481#define TILECTL_SWZCTL (1 << 0) 1532#define TILECTL_SWZCTL (1 << 0)
1482#define TILECTL_TLBPF (1 << 1) 1533#define TILECTL_TLBPF (1 << 1)
1483#define TILECTL_TLB_PREFETCH_DIS (1 << 2) 1534#define TILECTL_TLB_PREFETCH_DIS (1 << 2)
@@ -1486,30 +1537,30 @@ enum skl_disp_power_wells {
1486/* 1537/*
1487 * Instruction and interrupt control regs 1538 * Instruction and interrupt control regs
1488 */ 1539 */
1489#define PGTBL_CTL 0x02020 1540#define PGTBL_CTL _MMIO(0x02020)
1490#define PGTBL_ADDRESS_LO_MASK 0xfffff000 /* bits [31:12] */ 1541#define PGTBL_ADDRESS_LO_MASK 0xfffff000 /* bits [31:12] */
1491#define PGTBL_ADDRESS_HI_MASK 0x000000f0 /* bits [35:32] (gen4) */ 1542#define PGTBL_ADDRESS_HI_MASK 0x000000f0 /* bits [35:32] (gen4) */
1492#define PGTBL_ER 0x02024 1543#define PGTBL_ER _MMIO(0x02024)
1493#define PRB0_BASE (0x2030-0x30) 1544#define PRB0_BASE (0x2030-0x30)
1494#define PRB1_BASE (0x2040-0x30) /* 830,gen3 */ 1545#define PRB1_BASE (0x2040-0x30) /* 830,gen3 */
1495#define PRB2_BASE (0x2050-0x30) /* gen3 */ 1546#define PRB2_BASE (0x2050-0x30) /* gen3 */
1496#define SRB0_BASE (0x2100-0x30) /* gen2 */ 1547#define SRB0_BASE (0x2100-0x30) /* gen2 */
1497#define SRB1_BASE (0x2110-0x30) /* gen2 */ 1548#define SRB1_BASE (0x2110-0x30) /* gen2 */
1498#define SRB2_BASE (0x2120-0x30) /* 830 */ 1549#define SRB2_BASE (0x2120-0x30) /* 830 */
1499#define SRB3_BASE (0x2130-0x30) /* 830 */ 1550#define SRB3_BASE (0x2130-0x30) /* 830 */
1500#define RENDER_RING_BASE 0x02000 1551#define RENDER_RING_BASE 0x02000
1501#define BSD_RING_BASE 0x04000 1552#define BSD_RING_BASE 0x04000
1502#define GEN6_BSD_RING_BASE 0x12000 1553#define GEN6_BSD_RING_BASE 0x12000
1503#define GEN8_BSD2_RING_BASE 0x1c000 1554#define GEN8_BSD2_RING_BASE 0x1c000
1504#define VEBOX_RING_BASE 0x1a000 1555#define VEBOX_RING_BASE 0x1a000
1505#define BLT_RING_BASE 0x22000 1556#define BLT_RING_BASE 0x22000
1506#define RING_TAIL(base) ((base)+0x30) 1557#define RING_TAIL(base) _MMIO((base)+0x30)
1507#define RING_HEAD(base) ((base)+0x34) 1558#define RING_HEAD(base) _MMIO((base)+0x34)
1508#define RING_START(base) ((base)+0x38) 1559#define RING_START(base) _MMIO((base)+0x38)
1509#define RING_CTL(base) ((base)+0x3c) 1560#define RING_CTL(base) _MMIO((base)+0x3c)
1510#define RING_SYNC_0(base) ((base)+0x40) 1561#define RING_SYNC_0(base) _MMIO((base)+0x40)
1511#define RING_SYNC_1(base) ((base)+0x44) 1562#define RING_SYNC_1(base) _MMIO((base)+0x44)
1512#define RING_SYNC_2(base) ((base)+0x48) 1563#define RING_SYNC_2(base) _MMIO((base)+0x48)
1513#define GEN6_RVSYNC (RING_SYNC_0(RENDER_RING_BASE)) 1564#define GEN6_RVSYNC (RING_SYNC_0(RENDER_RING_BASE))
1514#define GEN6_RBSYNC (RING_SYNC_1(RENDER_RING_BASE)) 1565#define GEN6_RBSYNC (RING_SYNC_1(RENDER_RING_BASE))
1515#define GEN6_RVESYNC (RING_SYNC_2(RENDER_RING_BASE)) 1566#define GEN6_RVESYNC (RING_SYNC_2(RENDER_RING_BASE))
@@ -1522,51 +1573,52 @@ enum skl_disp_power_wells {
1522#define GEN6_VEBSYNC (RING_SYNC_0(VEBOX_RING_BASE)) 1573#define GEN6_VEBSYNC (RING_SYNC_0(VEBOX_RING_BASE))
1523#define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE)) 1574#define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE))
1524#define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE)) 1575#define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE))
1525#define GEN6_NOSYNC 0 1576#define GEN6_NOSYNC INVALID_MMIO_REG
1526#define RING_PSMI_CTL(base) ((base)+0x50) 1577#define RING_PSMI_CTL(base) _MMIO((base)+0x50)
1527#define RING_MAX_IDLE(base) ((base)+0x54) 1578#define RING_MAX_IDLE(base) _MMIO((base)+0x54)
1528#define RING_HWS_PGA(base) ((base)+0x80) 1579#define RING_HWS_PGA(base) _MMIO((base)+0x80)
1529#define RING_HWS_PGA_GEN6(base) ((base)+0x2080) 1580#define RING_HWS_PGA_GEN6(base) _MMIO((base)+0x2080)
1530#define RING_RESET_CTL(base) ((base)+0xd0) 1581#define RING_RESET_CTL(base) _MMIO((base)+0xd0)
1531#define RESET_CTL_REQUEST_RESET (1 << 0) 1582#define RESET_CTL_REQUEST_RESET (1 << 0)
1532#define RESET_CTL_READY_TO_RESET (1 << 1) 1583#define RESET_CTL_READY_TO_RESET (1 << 1)
1533 1584
1534#define HSW_GTT_CACHE_EN 0x4024 1585#define HSW_GTT_CACHE_EN _MMIO(0x4024)
1535#define GTT_CACHE_EN_ALL 0xF0007FFF 1586#define GTT_CACHE_EN_ALL 0xF0007FFF
1536#define GEN7_WR_WATERMARK 0x4028 1587#define GEN7_WR_WATERMARK _MMIO(0x4028)
1537#define GEN7_GFX_PRIO_CTRL 0x402C 1588#define GEN7_GFX_PRIO_CTRL _MMIO(0x402C)
1538#define ARB_MODE 0x4030 1589#define ARB_MODE _MMIO(0x4030)
1539#define ARB_MODE_SWIZZLE_SNB (1<<4) 1590#define ARB_MODE_SWIZZLE_SNB (1<<4)
1540#define ARB_MODE_SWIZZLE_IVB (1<<5) 1591#define ARB_MODE_SWIZZLE_IVB (1<<5)
1541#define GEN7_GFX_PEND_TLB0 0x4034 1592#define GEN7_GFX_PEND_TLB0 _MMIO(0x4034)
1542#define GEN7_GFX_PEND_TLB1 0x4038 1593#define GEN7_GFX_PEND_TLB1 _MMIO(0x4038)
1543/* L3, CVS, ZTLB, RCC, CASC LRA min, max values */ 1594/* L3, CVS, ZTLB, RCC, CASC LRA min, max values */
1544#define GEN7_LRA_LIMITS(i) (0x403C + (i) * 4) 1595#define GEN7_LRA_LIMITS(i) _MMIO(0x403C + (i) * 4)
1545#define GEN7_LRA_LIMITS_REG_NUM 13 1596#define GEN7_LRA_LIMITS_REG_NUM 13
1546#define GEN7_MEDIA_MAX_REQ_COUNT 0x4070 1597#define GEN7_MEDIA_MAX_REQ_COUNT _MMIO(0x4070)
1547#define GEN7_GFX_MAX_REQ_COUNT 0x4074 1598#define GEN7_GFX_MAX_REQ_COUNT _MMIO(0x4074)
1548 1599
1549#define GAMTARBMODE 0x04a08 1600#define GAMTARBMODE _MMIO(0x04a08)
1550#define ARB_MODE_BWGTLB_DISABLE (1<<9) 1601#define ARB_MODE_BWGTLB_DISABLE (1<<9)
1551#define ARB_MODE_SWIZZLE_BDW (1<<1) 1602#define ARB_MODE_SWIZZLE_BDW (1<<1)
1552#define RENDER_HWS_PGA_GEN7 (0x04080) 1603#define RENDER_HWS_PGA_GEN7 _MMIO(0x04080)
1553#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id) 1604#define RING_FAULT_REG(ring) _MMIO(0x4094 + 0x100*(ring)->id)
1554#define RING_FAULT_GTTSEL_MASK (1<<11) 1605#define RING_FAULT_GTTSEL_MASK (1<<11)
1555#define RING_FAULT_SRCID(x) (((x) >> 3) & 0xff) 1606#define RING_FAULT_SRCID(x) (((x) >> 3) & 0xff)
1556#define RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3) 1607#define RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3)
1557#define RING_FAULT_VALID (1<<0) 1608#define RING_FAULT_VALID (1<<0)
1558#define DONE_REG 0x40b0 1609#define DONE_REG _MMIO(0x40b0)
1559#define GEN8_PRIVATE_PAT_LO 0x40e0 1610#define GEN8_PRIVATE_PAT_LO _MMIO(0x40e0)
1560#define GEN8_PRIVATE_PAT_HI (0x40e0 + 4) 1611#define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4)
1561#define BSD_HWS_PGA_GEN7 (0x04180) 1612#define BSD_HWS_PGA_GEN7 _MMIO(0x04180)
1562#define BLT_HWS_PGA_GEN7 (0x04280) 1613#define BLT_HWS_PGA_GEN7 _MMIO(0x04280)
1563#define VEBOX_HWS_PGA_GEN7 (0x04380) 1614#define VEBOX_HWS_PGA_GEN7 _MMIO(0x04380)
1564#define RING_ACTHD(base) ((base)+0x74) 1615#define RING_ACTHD(base) _MMIO((base)+0x74)
1565#define RING_ACTHD_UDW(base) ((base)+0x5c) 1616#define RING_ACTHD_UDW(base) _MMIO((base)+0x5c)
1566#define RING_NOPID(base) ((base)+0x94) 1617#define RING_NOPID(base) _MMIO((base)+0x94)
1567#define RING_IMR(base) ((base)+0xa8) 1618#define RING_IMR(base) _MMIO((base)+0xa8)
1568#define RING_HWSTAM(base) ((base)+0x98) 1619#define RING_HWSTAM(base) _MMIO((base)+0x98)
1569#define RING_TIMESTAMP(base) ((base)+0x358) 1620#define RING_TIMESTAMP(base) _MMIO((base)+0x358)
1621#define RING_TIMESTAMP_UDW(base) _MMIO((base)+0x358 + 4)
1570#define TAIL_ADDR 0x001FFFF8 1622#define TAIL_ADDR 0x001FFFF8
1571#define HEAD_WRAP_COUNT 0xFFE00000 1623#define HEAD_WRAP_COUNT 0xFFE00000
1572#define HEAD_WRAP_ONE 0x00200000 1624#define HEAD_WRAP_ONE 0x00200000
@@ -1583,57 +1635,65 @@ enum skl_disp_power_wells {
1583#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */ 1635#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */
1584#define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */ 1636#define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */
1585 1637
1586#define GEN7_TLB_RD_ADDR 0x4700 1638#define GEN7_TLB_RD_ADDR _MMIO(0x4700)
1587 1639
1588#if 0 1640#if 0
1589#define PRB0_TAIL 0x02030 1641#define PRB0_TAIL _MMIO(0x2030)
1590#define PRB0_HEAD 0x02034 1642#define PRB0_HEAD _MMIO(0x2034)
1591#define PRB0_START 0x02038 1643#define PRB0_START _MMIO(0x2038)
1592#define PRB0_CTL 0x0203c 1644#define PRB0_CTL _MMIO(0x203c)
1593#define PRB1_TAIL 0x02040 /* 915+ only */ 1645#define PRB1_TAIL _MMIO(0x2040) /* 915+ only */
1594#define PRB1_HEAD 0x02044 /* 915+ only */ 1646#define PRB1_HEAD _MMIO(0x2044) /* 915+ only */
1595#define PRB1_START 0x02048 /* 915+ only */ 1647#define PRB1_START _MMIO(0x2048) /* 915+ only */
1596#define PRB1_CTL 0x0204c /* 915+ only */ 1648#define PRB1_CTL _MMIO(0x204c) /* 915+ only */
1597#endif 1649#endif
1598#define IPEIR_I965 0x02064 1650#define IPEIR_I965 _MMIO(0x2064)
1599#define IPEHR_I965 0x02068 1651#define IPEHR_I965 _MMIO(0x2068)
1600#define GEN7_SC_INSTDONE 0x07100 1652#define GEN7_SC_INSTDONE _MMIO(0x7100)
1601#define GEN7_SAMPLER_INSTDONE 0x0e160 1653#define GEN7_SAMPLER_INSTDONE _MMIO(0xe160)
1602#define GEN7_ROW_INSTDONE 0x0e164 1654#define GEN7_ROW_INSTDONE _MMIO(0xe164)
1603#define I915_NUM_INSTDONE_REG 4 1655#define I915_NUM_INSTDONE_REG 4
1604#define RING_IPEIR(base) ((base)+0x64) 1656#define RING_IPEIR(base) _MMIO((base)+0x64)
1605#define RING_IPEHR(base) ((base)+0x68) 1657#define RING_IPEHR(base) _MMIO((base)+0x68)
1606/* 1658/*
1607 * On GEN4, only the render ring INSTDONE exists and has a different 1659 * On GEN4, only the render ring INSTDONE exists and has a different
1608 * layout than the GEN7+ version. 1660 * layout than the GEN7+ version.
1609 * The GEN2 counterpart of this register is GEN2_INSTDONE. 1661 * The GEN2 counterpart of this register is GEN2_INSTDONE.
1610 */ 1662 */
1611#define RING_INSTDONE(base) ((base)+0x6c) 1663#define RING_INSTDONE(base) _MMIO((base)+0x6c)
1612#define RING_INSTPS(base) ((base)+0x70) 1664#define RING_INSTPS(base) _MMIO((base)+0x70)
1613#define RING_DMA_FADD(base) ((base)+0x78) 1665#define RING_DMA_FADD(base) _MMIO((base)+0x78)
1614#define RING_DMA_FADD_UDW(base) ((base)+0x60) /* gen8+ */ 1666#define RING_DMA_FADD_UDW(base) _MMIO((base)+0x60) /* gen8+ */
1615#define RING_INSTPM(base) ((base)+0xc0) 1667#define RING_INSTPM(base) _MMIO((base)+0xc0)
1616#define RING_MI_MODE(base) ((base)+0x9c) 1668#define RING_MI_MODE(base) _MMIO((base)+0x9c)
1617#define INSTPS 0x02070 /* 965+ only */ 1669#define INSTPS _MMIO(0x2070) /* 965+ only */
1618#define GEN4_INSTDONE1 0x0207c /* 965+ only, aka INSTDONE_2 on SNB */ 1670#define GEN4_INSTDONE1 _MMIO(0x207c) /* 965+ only, aka INSTDONE_2 on SNB */
1619#define ACTHD_I965 0x02074 1671#define ACTHD_I965 _MMIO(0x2074)
1620#define HWS_PGA 0x02080 1672#define HWS_PGA _MMIO(0x2080)
1621#define HWS_ADDRESS_MASK 0xfffff000 1673#define HWS_ADDRESS_MASK 0xfffff000
1622#define HWS_START_ADDRESS_SHIFT 4 1674#define HWS_START_ADDRESS_SHIFT 4
1623#define PWRCTXA 0x2088 /* 965GM+ only */ 1675#define PWRCTXA _MMIO(0x2088) /* 965GM+ only */
1624#define PWRCTX_EN (1<<0) 1676#define PWRCTX_EN (1<<0)
1625#define IPEIR 0x02088 1677#define IPEIR _MMIO(0x2088)
1626#define IPEHR 0x0208c 1678#define IPEHR _MMIO(0x208c)
1627#define GEN2_INSTDONE 0x02090 1679#define GEN2_INSTDONE _MMIO(0x2090)
1628#define NOPID 0x02094 1680#define NOPID _MMIO(0x2094)
1629#define HWSTAM 0x02098 1681#define HWSTAM _MMIO(0x2098)
1630#define DMA_FADD_I8XX 0x020d0 1682#define DMA_FADD_I8XX _MMIO(0x20d0)
1631#define RING_BBSTATE(base) ((base)+0x110) 1683#define RING_BBSTATE(base) _MMIO((base)+0x110)
1632#define RING_BBADDR(base) ((base)+0x140) 1684#define RING_BB_PPGTT (1 << 5)
1633#define RING_BBADDR_UDW(base) ((base)+0x168) /* gen8+ */ 1685#define RING_SBBADDR(base) _MMIO((base)+0x114) /* hsw+ */
1634 1686#define RING_SBBSTATE(base) _MMIO((base)+0x118) /* hsw+ */
1635#define ERROR_GEN6 0x040a0 1687#define RING_SBBADDR_UDW(base) _MMIO((base)+0x11c) /* gen8+ */
1636#define GEN7_ERR_INT 0x44040 1688#define RING_BBADDR(base) _MMIO((base)+0x140)
1689#define RING_BBADDR_UDW(base) _MMIO((base)+0x168) /* gen8+ */
1690#define RING_BB_PER_CTX_PTR(base) _MMIO((base)+0x1c0) /* gen8+ */
1691#define RING_INDIRECT_CTX(base) _MMIO((base)+0x1c4) /* gen8+ */
1692#define RING_INDIRECT_CTX_OFFSET(base) _MMIO((base)+0x1c8) /* gen8+ */
1693#define RING_CTX_TIMESTAMP(base) _MMIO((base)+0x3a8) /* gen8+ */
1694
1695#define ERROR_GEN6 _MMIO(0x40a0)
1696#define GEN7_ERR_INT _MMIO(0x44040)
1637#define ERR_INT_POISON (1<<31) 1697#define ERR_INT_POISON (1<<31)
1638#define ERR_INT_MMIO_UNCLAIMED (1<<13) 1698#define ERR_INT_MMIO_UNCLAIMED (1<<13)
1639#define ERR_INT_PIPE_CRC_DONE_C (1<<8) 1699#define ERR_INT_PIPE_CRC_DONE_C (1<<8)
@@ -1645,13 +1705,13 @@ enum skl_disp_power_wells {
1645#define ERR_INT_FIFO_UNDERRUN_A (1<<0) 1705#define ERR_INT_FIFO_UNDERRUN_A (1<<0)
1646#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<((pipe)*3)) 1706#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<((pipe)*3))
1647 1707
1648#define GEN8_FAULT_TLB_DATA0 0x04b10 1708#define GEN8_FAULT_TLB_DATA0 _MMIO(0x4b10)
1649#define GEN8_FAULT_TLB_DATA1 0x04b14 1709#define GEN8_FAULT_TLB_DATA1 _MMIO(0x4b14)
1650 1710
1651#define FPGA_DBG 0x42300 1711#define FPGA_DBG _MMIO(0x42300)
1652#define FPGA_DBG_RM_NOCLAIM (1<<31) 1712#define FPGA_DBG_RM_NOCLAIM (1<<31)
1653 1713
1654#define DERRMR 0x44050 1714#define DERRMR _MMIO(0x44050)
1655/* Note that HBLANK events are reserved on bdw+ */ 1715/* Note that HBLANK events are reserved on bdw+ */
1656#define DERRMR_PIPEA_SCANLINE (1<<0) 1716#define DERRMR_PIPEA_SCANLINE (1<<0)
1657#define DERRMR_PIPEA_PRI_FLIP_DONE (1<<1) 1717#define DERRMR_PIPEA_PRI_FLIP_DONE (1<<1)
@@ -1675,29 +1735,29 @@ enum skl_disp_power_wells {
1675 * for various sorts of correct behavior. The top 16 bits of each are 1735 * for various sorts of correct behavior. The top 16 bits of each are
1676 * the enables for writing to the corresponding low bit. 1736 * the enables for writing to the corresponding low bit.
1677 */ 1737 */
1678#define _3D_CHICKEN 0x02084 1738#define _3D_CHICKEN _MMIO(0x2084)
1679#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10) 1739#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10)
1680#define _3D_CHICKEN2 0x0208c 1740#define _3D_CHICKEN2 _MMIO(0x208c)
1681/* Disables pipelining of read flushes past the SF-WIZ interface. 1741/* Disables pipelining of read flushes past the SF-WIZ interface.
1682 * Required on all Ironlake steppings according to the B-Spec, but the 1742 * Required on all Ironlake steppings according to the B-Spec, but the
1683 * particular danger of not doing so is not specified. 1743 * particular danger of not doing so is not specified.
1684 */ 1744 */
1685# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) 1745# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
1686#define _3D_CHICKEN3 0x02090 1746#define _3D_CHICKEN3 _MMIO(0x2090)
1687#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10) 1747#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
1688#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5) 1748#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
1689#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x)<<1) /* gen8+ */ 1749#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x)<<1) /* gen8+ */
1690#define _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH (1 << 1) /* gen6 */ 1750#define _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH (1 << 1) /* gen6 */
1691 1751
1692#define MI_MODE 0x0209c 1752#define MI_MODE _MMIO(0x209c)
1693# define VS_TIMER_DISPATCH (1 << 6) 1753# define VS_TIMER_DISPATCH (1 << 6)
1694# define MI_FLUSH_ENABLE (1 << 12) 1754# define MI_FLUSH_ENABLE (1 << 12)
1695# define ASYNC_FLIP_PERF_DISABLE (1 << 14) 1755# define ASYNC_FLIP_PERF_DISABLE (1 << 14)
1696# define MODE_IDLE (1 << 9) 1756# define MODE_IDLE (1 << 9)
1697# define STOP_RING (1 << 8) 1757# define STOP_RING (1 << 8)
1698 1758
1699#define GEN6_GT_MODE 0x20d0 1759#define GEN6_GT_MODE _MMIO(0x20d0)
1700#define GEN7_GT_MODE 0x7008 1760#define GEN7_GT_MODE _MMIO(0x7008)
1701#define GEN6_WIZ_HASHING(hi, lo) (((hi) << 9) | ((lo) << 7)) 1761#define GEN6_WIZ_HASHING(hi, lo) (((hi) << 9) | ((lo) << 7))
1702#define GEN6_WIZ_HASHING_8x8 GEN6_WIZ_HASHING(0, 0) 1762#define GEN6_WIZ_HASHING_8x8 GEN6_WIZ_HASHING(0, 0)
1703#define GEN6_WIZ_HASHING_8x4 GEN6_WIZ_HASHING(0, 1) 1763#define GEN6_WIZ_HASHING_8x4 GEN6_WIZ_HASHING(0, 1)
@@ -1707,9 +1767,9 @@ enum skl_disp_power_wells {
1707#define GEN9_IZ_HASHING_MASK(slice) (0x3 << ((slice) * 2)) 1767#define GEN9_IZ_HASHING_MASK(slice) (0x3 << ((slice) * 2))
1708#define GEN9_IZ_HASHING(slice, val) ((val) << ((slice) * 2)) 1768#define GEN9_IZ_HASHING(slice, val) ((val) << ((slice) * 2))
1709 1769
1710#define GFX_MODE 0x02520 1770#define GFX_MODE _MMIO(0x2520)
1711#define GFX_MODE_GEN7 0x0229c 1771#define GFX_MODE_GEN7 _MMIO(0x229c)
1712#define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c) 1772#define RING_MODE_GEN7(ring) _MMIO((ring)->mmio_base+0x29c)
1713#define GFX_RUN_LIST_ENABLE (1<<15) 1773#define GFX_RUN_LIST_ENABLE (1<<15)
1714#define GFX_INTERRUPT_STEERING (1<<14) 1774#define GFX_INTERRUPT_STEERING (1<<14)
1715#define GFX_TLB_INVALIDATE_EXPLICIT (1<<13) 1775#define GFX_TLB_INVALIDATE_EXPLICIT (1<<13)
@@ -1727,36 +1787,36 @@ enum skl_disp_power_wells {
1727#define VLV_DISPLAY_BASE 0x180000 1787#define VLV_DISPLAY_BASE 0x180000
1728#define VLV_MIPI_BASE VLV_DISPLAY_BASE 1788#define VLV_MIPI_BASE VLV_DISPLAY_BASE
1729 1789
1730#define VLV_GU_CTL0 (VLV_DISPLAY_BASE + 0x2030) 1790#define VLV_GU_CTL0 _MMIO(VLV_DISPLAY_BASE + 0x2030)
1731#define VLV_GU_CTL1 (VLV_DISPLAY_BASE + 0x2034) 1791#define VLV_GU_CTL1 _MMIO(VLV_DISPLAY_BASE + 0x2034)
1732#define SCPD0 0x0209c /* 915+ only */ 1792#define SCPD0 _MMIO(0x209c) /* 915+ only */
1733#define IER 0x020a0 1793#define IER _MMIO(0x20a0)
1734#define IIR 0x020a4 1794#define IIR _MMIO(0x20a4)
1735#define IMR 0x020a8 1795#define IMR _MMIO(0x20a8)
1736#define ISR 0x020ac 1796#define ISR _MMIO(0x20ac)
1737#define VLV_GUNIT_CLOCK_GATE (VLV_DISPLAY_BASE + 0x2060) 1797#define VLV_GUNIT_CLOCK_GATE _MMIO(VLV_DISPLAY_BASE + 0x2060)
1738#define GINT_DIS (1<<22) 1798#define GINT_DIS (1<<22)
1739#define GCFG_DIS (1<<8) 1799#define GCFG_DIS (1<<8)
1740#define VLV_GUNIT_CLOCK_GATE2 (VLV_DISPLAY_BASE + 0x2064) 1800#define VLV_GUNIT_CLOCK_GATE2 _MMIO(VLV_DISPLAY_BASE + 0x2064)
1741#define VLV_IIR_RW (VLV_DISPLAY_BASE + 0x2084) 1801#define VLV_IIR_RW _MMIO(VLV_DISPLAY_BASE + 0x2084)
1742#define VLV_IER (VLV_DISPLAY_BASE + 0x20a0) 1802#define VLV_IER _MMIO(VLV_DISPLAY_BASE + 0x20a0)
1743#define VLV_IIR (VLV_DISPLAY_BASE + 0x20a4) 1803#define VLV_IIR _MMIO(VLV_DISPLAY_BASE + 0x20a4)
1744#define VLV_IMR (VLV_DISPLAY_BASE + 0x20a8) 1804#define VLV_IMR _MMIO(VLV_DISPLAY_BASE + 0x20a8)
1745#define VLV_ISR (VLV_DISPLAY_BASE + 0x20ac) 1805#define VLV_ISR _MMIO(VLV_DISPLAY_BASE + 0x20ac)
1746#define VLV_PCBR (VLV_DISPLAY_BASE + 0x2120) 1806#define VLV_PCBR _MMIO(VLV_DISPLAY_BASE + 0x2120)
1747#define VLV_PCBR_ADDR_SHIFT 12 1807#define VLV_PCBR_ADDR_SHIFT 12
1748 1808
1749#define DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */ 1809#define DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */
1750#define EIR 0x020b0 1810#define EIR _MMIO(0x20b0)
1751#define EMR 0x020b4 1811#define EMR _MMIO(0x20b4)
1752#define ESR 0x020b8 1812#define ESR _MMIO(0x20b8)
1753#define GM45_ERROR_PAGE_TABLE (1<<5) 1813#define GM45_ERROR_PAGE_TABLE (1<<5)
1754#define GM45_ERROR_MEM_PRIV (1<<4) 1814#define GM45_ERROR_MEM_PRIV (1<<4)
1755#define I915_ERROR_PAGE_TABLE (1<<4) 1815#define I915_ERROR_PAGE_TABLE (1<<4)
1756#define GM45_ERROR_CP_PRIV (1<<3) 1816#define GM45_ERROR_CP_PRIV (1<<3)
1757#define I915_ERROR_MEMORY_REFRESH (1<<1) 1817#define I915_ERROR_MEMORY_REFRESH (1<<1)
1758#define I915_ERROR_INSTRUCTION (1<<0) 1818#define I915_ERROR_INSTRUCTION (1<<0)
1759#define INSTPM 0x020c0 1819#define INSTPM _MMIO(0x20c0)
1760#define INSTPM_SELF_EN (1<<12) /* 915GM only */ 1820#define INSTPM_SELF_EN (1<<12) /* 915GM only */
1761#define INSTPM_AGPBUSY_INT_EN (1<<11) /* gen3: when disabled, pending interrupts 1821#define INSTPM_AGPBUSY_INT_EN (1<<11) /* gen3: when disabled, pending interrupts
1762 will not assert AGPBUSY# and will only 1822 will not assert AGPBUSY# and will only
@@ -1764,14 +1824,14 @@ enum skl_disp_power_wells {
1764#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */ 1824#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
1765#define INSTPM_TLB_INVALIDATE (1<<9) 1825#define INSTPM_TLB_INVALIDATE (1<<9)
1766#define INSTPM_SYNC_FLUSH (1<<5) 1826#define INSTPM_SYNC_FLUSH (1<<5)
1767#define ACTHD 0x020c8 1827#define ACTHD _MMIO(0x20c8)
1768#define MEM_MODE 0x020cc 1828#define MEM_MODE _MMIO(0x20cc)
1769#define MEM_DISPLAY_B_TRICKLE_FEED_DISABLE (1<<3) /* 830 only */ 1829#define MEM_DISPLAY_B_TRICKLE_FEED_DISABLE (1<<3) /* 830 only */
1770#define MEM_DISPLAY_A_TRICKLE_FEED_DISABLE (1<<2) /* 830/845 only */ 1830#define MEM_DISPLAY_A_TRICKLE_FEED_DISABLE (1<<2) /* 830/845 only */
1771#define MEM_DISPLAY_TRICKLE_FEED_DISABLE (1<<2) /* 85x only */ 1831#define MEM_DISPLAY_TRICKLE_FEED_DISABLE (1<<2) /* 85x only */
1772#define FW_BLC 0x020d8 1832#define FW_BLC _MMIO(0x20d8)
1773#define FW_BLC2 0x020dc 1833#define FW_BLC2 _MMIO(0x20dc)
1774#define FW_BLC_SELF 0x020e0 /* 915+ only */ 1834#define FW_BLC_SELF _MMIO(0x20e0) /* 915+ only */
1775#define FW_BLC_SELF_EN_MASK (1<<31) 1835#define FW_BLC_SELF_EN_MASK (1<<31)
1776#define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */ 1836#define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */
1777#define FW_BLC_SELF_EN (1<<15) /* 945 only */ 1837#define FW_BLC_SELF_EN (1<<15) /* 945 only */
@@ -1779,7 +1839,7 @@ enum skl_disp_power_wells {
1779#define MM_FIFO_WATERMARK 0x0001F000 1839#define MM_FIFO_WATERMARK 0x0001F000
1780#define LM_BURST_LENGTH 0x00000700 1840#define LM_BURST_LENGTH 0x00000700
1781#define LM_FIFO_WATERMARK 0x0000001F 1841#define LM_FIFO_WATERMARK 0x0000001F
1782#define MI_ARB_STATE 0x020e4 /* 915+ only */ 1842#define MI_ARB_STATE _MMIO(0x20e4) /* 915+ only */
1783 1843
1784/* Make render/texture TLB fetches lower priorty than associated data 1844/* Make render/texture TLB fetches lower priorty than associated data
1785 * fetches. This is not turned on by default 1845 * fetches. This is not turned on by default
@@ -1843,11 +1903,11 @@ enum skl_disp_power_wells {
1843#define MI_ARB_DISPLAY_PRIORITY_A_B (0 << 0) /* display A > display B */ 1903#define MI_ARB_DISPLAY_PRIORITY_A_B (0 << 0) /* display A > display B */
1844#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */ 1904#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */
1845 1905
1846#define MI_STATE 0x020e4 /* gen2 only */ 1906#define MI_STATE _MMIO(0x20e4) /* gen2 only */
1847#define MI_AGPBUSY_INT_EN (1 << 1) /* 85x only */ 1907#define MI_AGPBUSY_INT_EN (1 << 1) /* 85x only */
1848#define MI_AGPBUSY_830_MODE (1 << 0) /* 85x only */ 1908#define MI_AGPBUSY_830_MODE (1 << 0) /* 85x only */
1849 1909
1850#define CACHE_MODE_0 0x02120 /* 915+ only */ 1910#define CACHE_MODE_0 _MMIO(0x2120) /* 915+ only */
1851#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8) 1911#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8)
1852#define CM0_IZ_OPT_DISABLE (1<<6) 1912#define CM0_IZ_OPT_DISABLE (1<<6)
1853#define CM0_ZR_OPT_DISABLE (1<<5) 1913#define CM0_ZR_OPT_DISABLE (1<<5)
@@ -1856,32 +1916,32 @@ enum skl_disp_power_wells {
1856#define CM0_COLOR_EVICT_DISABLE (1<<3) 1916#define CM0_COLOR_EVICT_DISABLE (1<<3)
1857#define CM0_DEPTH_WRITE_DISABLE (1<<1) 1917#define CM0_DEPTH_WRITE_DISABLE (1<<1)
1858#define CM0_RC_OP_FLUSH_DISABLE (1<<0) 1918#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
1859#define GFX_FLSH_CNTL 0x02170 /* 915+ only */ 1919#define GFX_FLSH_CNTL _MMIO(0x2170) /* 915+ only */
1860#define GFX_FLSH_CNTL_GEN6 0x101008 1920#define GFX_FLSH_CNTL_GEN6 _MMIO(0x101008)
1861#define GFX_FLSH_CNTL_EN (1<<0) 1921#define GFX_FLSH_CNTL_EN (1<<0)
1862#define ECOSKPD 0x021d0 1922#define ECOSKPD _MMIO(0x21d0)
1863#define ECO_GATING_CX_ONLY (1<<3) 1923#define ECO_GATING_CX_ONLY (1<<3)
1864#define ECO_FLIP_DONE (1<<0) 1924#define ECO_FLIP_DONE (1<<0)
1865 1925
1866#define CACHE_MODE_0_GEN7 0x7000 /* IVB+ */ 1926#define CACHE_MODE_0_GEN7 _MMIO(0x7000) /* IVB+ */
1867#define RC_OP_FLUSH_ENABLE (1<<0) 1927#define RC_OP_FLUSH_ENABLE (1<<0)
1868#define HIZ_RAW_STALL_OPT_DISABLE (1<<2) 1928#define HIZ_RAW_STALL_OPT_DISABLE (1<<2)
1869#define CACHE_MODE_1 0x7004 /* IVB+ */ 1929#define CACHE_MODE_1 _MMIO(0x7004) /* IVB+ */
1870#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6) 1930#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
1871#define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1<<6) 1931#define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1<<6)
1872#define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE (1<<1) 1932#define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE (1<<1)
1873 1933
1874#define GEN6_BLITTER_ECOSKPD 0x221d0 1934#define GEN6_BLITTER_ECOSKPD _MMIO(0x221d0)
1875#define GEN6_BLITTER_LOCK_SHIFT 16 1935#define GEN6_BLITTER_LOCK_SHIFT 16
1876#define GEN6_BLITTER_FBC_NOTIFY (1<<3) 1936#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
1877 1937
1878#define GEN6_RC_SLEEP_PSMI_CONTROL 0x2050 1938#define GEN6_RC_SLEEP_PSMI_CONTROL _MMIO(0x2050)
1879#define GEN6_PSMI_SLEEP_MSG_DISABLE (1 << 0) 1939#define GEN6_PSMI_SLEEP_MSG_DISABLE (1 << 0)
1880#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12) 1940#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
1881#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10) 1941#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10)
1882 1942
1883/* Fuse readout registers for GT */ 1943/* Fuse readout registers for GT */
1884#define CHV_FUSE_GT (VLV_DISPLAY_BASE + 0x2168) 1944#define CHV_FUSE_GT _MMIO(VLV_DISPLAY_BASE + 0x2168)
1885#define CHV_FGT_DISABLE_SS0 (1 << 10) 1945#define CHV_FGT_DISABLE_SS0 (1 << 10)
1886#define CHV_FGT_DISABLE_SS1 (1 << 11) 1946#define CHV_FGT_DISABLE_SS1 (1 << 11)
1887#define CHV_FGT_EU_DIS_SS0_R0_SHIFT 16 1947#define CHV_FGT_EU_DIS_SS0_R0_SHIFT 16
@@ -1893,7 +1953,7 @@ enum skl_disp_power_wells {
1893#define CHV_FGT_EU_DIS_SS1_R1_SHIFT 28 1953#define CHV_FGT_EU_DIS_SS1_R1_SHIFT 28
1894#define CHV_FGT_EU_DIS_SS1_R1_MASK (0xf << CHV_FGT_EU_DIS_SS1_R1_SHIFT) 1954#define CHV_FGT_EU_DIS_SS1_R1_MASK (0xf << CHV_FGT_EU_DIS_SS1_R1_SHIFT)
1895 1955
1896#define GEN8_FUSE2 0x9120 1956#define GEN8_FUSE2 _MMIO(0x9120)
1897#define GEN8_F2_SS_DIS_SHIFT 21 1957#define GEN8_F2_SS_DIS_SHIFT 21
1898#define GEN8_F2_SS_DIS_MASK (0x7 << GEN8_F2_SS_DIS_SHIFT) 1958#define GEN8_F2_SS_DIS_MASK (0x7 << GEN8_F2_SS_DIS_SHIFT)
1899#define GEN8_F2_S_ENA_SHIFT 25 1959#define GEN8_F2_S_ENA_SHIFT 25
@@ -1902,22 +1962,22 @@ enum skl_disp_power_wells {
1902#define GEN9_F2_SS_DIS_SHIFT 20 1962#define GEN9_F2_SS_DIS_SHIFT 20
1903#define GEN9_F2_SS_DIS_MASK (0xf << GEN9_F2_SS_DIS_SHIFT) 1963#define GEN9_F2_SS_DIS_MASK (0xf << GEN9_F2_SS_DIS_SHIFT)
1904 1964
1905#define GEN8_EU_DISABLE0 0x9134 1965#define GEN8_EU_DISABLE0 _MMIO(0x9134)
1906#define GEN8_EU_DIS0_S0_MASK 0xffffff 1966#define GEN8_EU_DIS0_S0_MASK 0xffffff
1907#define GEN8_EU_DIS0_S1_SHIFT 24 1967#define GEN8_EU_DIS0_S1_SHIFT 24
1908#define GEN8_EU_DIS0_S1_MASK (0xff << GEN8_EU_DIS0_S1_SHIFT) 1968#define GEN8_EU_DIS0_S1_MASK (0xff << GEN8_EU_DIS0_S1_SHIFT)
1909 1969
1910#define GEN8_EU_DISABLE1 0x9138 1970#define GEN8_EU_DISABLE1 _MMIO(0x9138)
1911#define GEN8_EU_DIS1_S1_MASK 0xffff 1971#define GEN8_EU_DIS1_S1_MASK 0xffff
1912#define GEN8_EU_DIS1_S2_SHIFT 16 1972#define GEN8_EU_DIS1_S2_SHIFT 16
1913#define GEN8_EU_DIS1_S2_MASK (0xffff << GEN8_EU_DIS1_S2_SHIFT) 1973#define GEN8_EU_DIS1_S2_MASK (0xffff << GEN8_EU_DIS1_S2_SHIFT)
1914 1974
1915#define GEN8_EU_DISABLE2 0x913c 1975#define GEN8_EU_DISABLE2 _MMIO(0x913c)
1916#define GEN8_EU_DIS2_S2_MASK 0xff 1976#define GEN8_EU_DIS2_S2_MASK 0xff
1917 1977
1918#define GEN9_EU_DISABLE(slice) (0x9134 + (slice)*0x4) 1978#define GEN9_EU_DISABLE(slice) _MMIO(0x9134 + (slice)*0x4)
1919 1979
1920#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050 1980#define GEN6_BSD_SLEEP_PSMI_CONTROL _MMIO(0x12050)
1921#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0) 1981#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0)
1922#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2) 1982#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2)
1923#define GEN6_BSD_SLEEP_INDICATOR (1 << 3) 1983#define GEN6_BSD_SLEEP_INDICATOR (1 << 3)
@@ -1995,9 +2055,9 @@ enum skl_disp_power_wells {
1995#define I915_ASLE_INTERRUPT (1<<0) 2055#define I915_ASLE_INTERRUPT (1<<0)
1996#define I915_BSD_USER_INTERRUPT (1<<25) 2056#define I915_BSD_USER_INTERRUPT (1<<25)
1997 2057
1998#define GEN6_BSD_RNCID 0x12198 2058#define GEN6_BSD_RNCID _MMIO(0x12198)
1999 2059
2000#define GEN7_FF_THREAD_MODE 0x20a0 2060#define GEN7_FF_THREAD_MODE _MMIO(0x20a0)
2001#define GEN7_FF_SCHED_MASK 0x0077070 2061#define GEN7_FF_SCHED_MASK 0x0077070
2002#define GEN8_FF_DS_REF_CNT_FFME (1 << 19) 2062#define GEN8_FF_DS_REF_CNT_FFME (1 << 19)
2003#define GEN7_FF_TS_SCHED_HS1 (0x5<<16) 2063#define GEN7_FF_TS_SCHED_HS1 (0x5<<16)
@@ -2018,9 +2078,9 @@ enum skl_disp_power_wells {
2018 * Framebuffer compression (915+ only) 2078 * Framebuffer compression (915+ only)
2019 */ 2079 */
2020 2080
2021#define FBC_CFB_BASE 0x03200 /* 4k page aligned */ 2081#define FBC_CFB_BASE _MMIO(0x3200) /* 4k page aligned */
2022#define FBC_LL_BASE 0x03204 /* 4k page aligned */ 2082#define FBC_LL_BASE _MMIO(0x3204) /* 4k page aligned */
2023#define FBC_CONTROL 0x03208 2083#define FBC_CONTROL _MMIO(0x3208)
2024#define FBC_CTL_EN (1<<31) 2084#define FBC_CTL_EN (1<<31)
2025#define FBC_CTL_PERIODIC (1<<30) 2085#define FBC_CTL_PERIODIC (1<<30)
2026#define FBC_CTL_INTERVAL_SHIFT (16) 2086#define FBC_CTL_INTERVAL_SHIFT (16)
@@ -2028,14 +2088,14 @@ enum skl_disp_power_wells {
2028#define FBC_CTL_C3_IDLE (1<<13) 2088#define FBC_CTL_C3_IDLE (1<<13)
2029#define FBC_CTL_STRIDE_SHIFT (5) 2089#define FBC_CTL_STRIDE_SHIFT (5)
2030#define FBC_CTL_FENCENO_SHIFT (0) 2090#define FBC_CTL_FENCENO_SHIFT (0)
2031#define FBC_COMMAND 0x0320c 2091#define FBC_COMMAND _MMIO(0x320c)
2032#define FBC_CMD_COMPRESS (1<<0) 2092#define FBC_CMD_COMPRESS (1<<0)
2033#define FBC_STATUS 0x03210 2093#define FBC_STATUS _MMIO(0x3210)
2034#define FBC_STAT_COMPRESSING (1<<31) 2094#define FBC_STAT_COMPRESSING (1<<31)
2035#define FBC_STAT_COMPRESSED (1<<30) 2095#define FBC_STAT_COMPRESSED (1<<30)
2036#define FBC_STAT_MODIFIED (1<<29) 2096#define FBC_STAT_MODIFIED (1<<29)
2037#define FBC_STAT_CURRENT_LINE_SHIFT (0) 2097#define FBC_STAT_CURRENT_LINE_SHIFT (0)
2038#define FBC_CONTROL2 0x03214 2098#define FBC_CONTROL2 _MMIO(0x3214)
2039#define FBC_CTL_FENCE_DBL (0<<4) 2099#define FBC_CTL_FENCE_DBL (0<<4)
2040#define FBC_CTL_IDLE_IMM (0<<2) 2100#define FBC_CTL_IDLE_IMM (0<<2)
2041#define FBC_CTL_IDLE_FULL (1<<2) 2101#define FBC_CTL_IDLE_FULL (1<<2)
@@ -2043,17 +2103,17 @@ enum skl_disp_power_wells {
2043#define FBC_CTL_IDLE_DEBUG (3<<2) 2103#define FBC_CTL_IDLE_DEBUG (3<<2)
2044#define FBC_CTL_CPU_FENCE (1<<1) 2104#define FBC_CTL_CPU_FENCE (1<<1)
2045#define FBC_CTL_PLANE(plane) ((plane)<<0) 2105#define FBC_CTL_PLANE(plane) ((plane)<<0)
2046#define FBC_FENCE_OFF 0x03218 /* BSpec typo has 321Bh */ 2106#define FBC_FENCE_OFF _MMIO(0x3218) /* BSpec typo has 321Bh */
2047#define FBC_TAG(i) (0x03300 + (i) * 4) 2107#define FBC_TAG(i) _MMIO(0x3300 + (i) * 4)
2048 2108
2049#define FBC_STATUS2 0x43214 2109#define FBC_STATUS2 _MMIO(0x43214)
2050#define FBC_COMPRESSION_MASK 0x7ff 2110#define FBC_COMPRESSION_MASK 0x7ff
2051 2111
2052#define FBC_LL_SIZE (1536) 2112#define FBC_LL_SIZE (1536)
2053 2113
2054/* Framebuffer compression for GM45+ */ 2114/* Framebuffer compression for GM45+ */
2055#define DPFC_CB_BASE 0x3200 2115#define DPFC_CB_BASE _MMIO(0x3200)
2056#define DPFC_CONTROL 0x3208 2116#define DPFC_CONTROL _MMIO(0x3208)
2057#define DPFC_CTL_EN (1<<31) 2117#define DPFC_CTL_EN (1<<31)
2058#define DPFC_CTL_PLANE(plane) ((plane)<<30) 2118#define DPFC_CTL_PLANE(plane) ((plane)<<30)
2059#define IVB_DPFC_CTL_PLANE(plane) ((plane)<<29) 2119#define IVB_DPFC_CTL_PLANE(plane) ((plane)<<29)
@@ -2064,37 +2124,37 @@ enum skl_disp_power_wells {
2064#define DPFC_CTL_LIMIT_1X (0<<6) 2124#define DPFC_CTL_LIMIT_1X (0<<6)
2065#define DPFC_CTL_LIMIT_2X (1<<6) 2125#define DPFC_CTL_LIMIT_2X (1<<6)
2066#define DPFC_CTL_LIMIT_4X (2<<6) 2126#define DPFC_CTL_LIMIT_4X (2<<6)
2067#define DPFC_RECOMP_CTL 0x320c 2127#define DPFC_RECOMP_CTL _MMIO(0x320c)
2068#define DPFC_RECOMP_STALL_EN (1<<27) 2128#define DPFC_RECOMP_STALL_EN (1<<27)
2069#define DPFC_RECOMP_STALL_WM_SHIFT (16) 2129#define DPFC_RECOMP_STALL_WM_SHIFT (16)
2070#define DPFC_RECOMP_STALL_WM_MASK (0x07ff0000) 2130#define DPFC_RECOMP_STALL_WM_MASK (0x07ff0000)
2071#define DPFC_RECOMP_TIMER_COUNT_SHIFT (0) 2131#define DPFC_RECOMP_TIMER_COUNT_SHIFT (0)
2072#define DPFC_RECOMP_TIMER_COUNT_MASK (0x0000003f) 2132#define DPFC_RECOMP_TIMER_COUNT_MASK (0x0000003f)
2073#define DPFC_STATUS 0x3210 2133#define DPFC_STATUS _MMIO(0x3210)
2074#define DPFC_INVAL_SEG_SHIFT (16) 2134#define DPFC_INVAL_SEG_SHIFT (16)
2075#define DPFC_INVAL_SEG_MASK (0x07ff0000) 2135#define DPFC_INVAL_SEG_MASK (0x07ff0000)
2076#define DPFC_COMP_SEG_SHIFT (0) 2136#define DPFC_COMP_SEG_SHIFT (0)
2077#define DPFC_COMP_SEG_MASK (0x000003ff) 2137#define DPFC_COMP_SEG_MASK (0x000003ff)
2078#define DPFC_STATUS2 0x3214 2138#define DPFC_STATUS2 _MMIO(0x3214)
2079#define DPFC_FENCE_YOFF 0x3218 2139#define DPFC_FENCE_YOFF _MMIO(0x3218)
2080#define DPFC_CHICKEN 0x3224 2140#define DPFC_CHICKEN _MMIO(0x3224)
2081#define DPFC_HT_MODIFY (1<<31) 2141#define DPFC_HT_MODIFY (1<<31)
2082 2142
2083/* Framebuffer compression for Ironlake */ 2143/* Framebuffer compression for Ironlake */
2084#define ILK_DPFC_CB_BASE 0x43200 2144#define ILK_DPFC_CB_BASE _MMIO(0x43200)
2085#define ILK_DPFC_CONTROL 0x43208 2145#define ILK_DPFC_CONTROL _MMIO(0x43208)
2086#define FBC_CTL_FALSE_COLOR (1<<10) 2146#define FBC_CTL_FALSE_COLOR (1<<10)
2087/* The bit 28-8 is reserved */ 2147/* The bit 28-8 is reserved */
2088#define DPFC_RESERVED (0x1FFFFF00) 2148#define DPFC_RESERVED (0x1FFFFF00)
2089#define ILK_DPFC_RECOMP_CTL 0x4320c 2149#define ILK_DPFC_RECOMP_CTL _MMIO(0x4320c)
2090#define ILK_DPFC_STATUS 0x43210 2150#define ILK_DPFC_STATUS _MMIO(0x43210)
2091#define ILK_DPFC_FENCE_YOFF 0x43218 2151#define ILK_DPFC_FENCE_YOFF _MMIO(0x43218)
2092#define ILK_DPFC_CHICKEN 0x43224 2152#define ILK_DPFC_CHICKEN _MMIO(0x43224)
2093#define ILK_FBC_RT_BASE 0x2128 2153#define ILK_FBC_RT_BASE _MMIO(0x2128)
2094#define ILK_FBC_RT_VALID (1<<0) 2154#define ILK_FBC_RT_VALID (1<<0)
2095#define SNB_FBC_FRONT_BUFFER (1<<1) 2155#define SNB_FBC_FRONT_BUFFER (1<<1)
2096 2156
2097#define ILK_DISPLAY_CHICKEN1 0x42000 2157#define ILK_DISPLAY_CHICKEN1 _MMIO(0x42000)
2098#define ILK_FBCQ_DIS (1<<22) 2158#define ILK_FBCQ_DIS (1<<22)
2099#define ILK_PABSTRETCH_DIS (1<<21) 2159#define ILK_PABSTRETCH_DIS (1<<21)
2100 2160
@@ -2104,31 +2164,31 @@ enum skl_disp_power_wells {
2104 * 2164 *
2105 * The following two registers are of type GTTMMADR 2165 * The following two registers are of type GTTMMADR
2106 */ 2166 */
2107#define SNB_DPFC_CTL_SA 0x100100 2167#define SNB_DPFC_CTL_SA _MMIO(0x100100)
2108#define SNB_CPU_FENCE_ENABLE (1<<29) 2168#define SNB_CPU_FENCE_ENABLE (1<<29)
2109#define DPFC_CPU_FENCE_OFFSET 0x100104 2169#define DPFC_CPU_FENCE_OFFSET _MMIO(0x100104)
2110 2170
2111/* Framebuffer compression for Ivybridge */ 2171/* Framebuffer compression for Ivybridge */
2112#define IVB_FBC_RT_BASE 0x7020 2172#define IVB_FBC_RT_BASE _MMIO(0x7020)
2113 2173
2114#define IPS_CTL 0x43408 2174#define IPS_CTL _MMIO(0x43408)
2115#define IPS_ENABLE (1 << 31) 2175#define IPS_ENABLE (1 << 31)
2116 2176
2117#define MSG_FBC_REND_STATE 0x50380 2177#define MSG_FBC_REND_STATE _MMIO(0x50380)
2118#define FBC_REND_NUKE (1<<2) 2178#define FBC_REND_NUKE (1<<2)
2119#define FBC_REND_CACHE_CLEAN (1<<1) 2179#define FBC_REND_CACHE_CLEAN (1<<1)
2120 2180
2121/* 2181/*
2122 * GPIO regs 2182 * GPIO regs
2123 */ 2183 */
2124#define GPIOA 0x5010 2184#define GPIOA _MMIO(0x5010)
2125#define GPIOB 0x5014 2185#define GPIOB _MMIO(0x5014)
2126#define GPIOC 0x5018 2186#define GPIOC _MMIO(0x5018)
2127#define GPIOD 0x501c 2187#define GPIOD _MMIO(0x501c)
2128#define GPIOE 0x5020 2188#define GPIOE _MMIO(0x5020)
2129#define GPIOF 0x5024 2189#define GPIOF _MMIO(0x5024)
2130#define GPIOG 0x5028 2190#define GPIOG _MMIO(0x5028)
2131#define GPIOH 0x502c 2191#define GPIOH _MMIO(0x502c)
2132# define GPIO_CLOCK_DIR_MASK (1 << 0) 2192# define GPIO_CLOCK_DIR_MASK (1 << 0)
2133# define GPIO_CLOCK_DIR_IN (0 << 1) 2193# define GPIO_CLOCK_DIR_IN (0 << 1)
2134# define GPIO_CLOCK_DIR_OUT (1 << 1) 2194# define GPIO_CLOCK_DIR_OUT (1 << 1)
@@ -2144,7 +2204,7 @@ enum skl_disp_power_wells {
2144# define GPIO_DATA_VAL_IN (1 << 12) 2204# define GPIO_DATA_VAL_IN (1 << 12)
2145# define GPIO_DATA_PULLUP_DISABLE (1 << 13) 2205# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
2146 2206
2147#define GMBUS0 (dev_priv->gpio_mmio_base + 0x5100) /* clock/port select */ 2207#define GMBUS0 _MMIO(dev_priv->gpio_mmio_base + 0x5100) /* clock/port select */
2148#define GMBUS_RATE_100KHZ (0<<8) 2208#define GMBUS_RATE_100KHZ (0<<8)
2149#define GMBUS_RATE_50KHZ (1<<8) 2209#define GMBUS_RATE_50KHZ (1<<8)
2150#define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */ 2210#define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */
@@ -2163,7 +2223,7 @@ enum skl_disp_power_wells {
2163#define GMBUS_PIN_2_BXT 2 2223#define GMBUS_PIN_2_BXT 2
2164#define GMBUS_PIN_3_BXT 3 2224#define GMBUS_PIN_3_BXT 3
2165#define GMBUS_NUM_PINS 7 /* including 0 */ 2225#define GMBUS_NUM_PINS 7 /* including 0 */
2166#define GMBUS1 (dev_priv->gpio_mmio_base + 0x5104) /* command/status */ 2226#define GMBUS1 _MMIO(dev_priv->gpio_mmio_base + 0x5104) /* command/status */
2167#define GMBUS_SW_CLR_INT (1<<31) 2227#define GMBUS_SW_CLR_INT (1<<31)
2168#define GMBUS_SW_RDY (1<<30) 2228#define GMBUS_SW_RDY (1<<30)
2169#define GMBUS_ENT (1<<29) /* enable timeout */ 2229#define GMBUS_ENT (1<<29) /* enable timeout */
@@ -2177,7 +2237,7 @@ enum skl_disp_power_wells {
2177#define GMBUS_SLAVE_ADDR_SHIFT 1 2237#define GMBUS_SLAVE_ADDR_SHIFT 1
2178#define GMBUS_SLAVE_READ (1<<0) 2238#define GMBUS_SLAVE_READ (1<<0)
2179#define GMBUS_SLAVE_WRITE (0<<0) 2239#define GMBUS_SLAVE_WRITE (0<<0)
2180#define GMBUS2 (dev_priv->gpio_mmio_base + 0x5108) /* status */ 2240#define GMBUS2 _MMIO(dev_priv->gpio_mmio_base + 0x5108) /* status */
2181#define GMBUS_INUSE (1<<15) 2241#define GMBUS_INUSE (1<<15)
2182#define GMBUS_HW_WAIT_PHASE (1<<14) 2242#define GMBUS_HW_WAIT_PHASE (1<<14)
2183#define GMBUS_STALL_TIMEOUT (1<<13) 2243#define GMBUS_STALL_TIMEOUT (1<<13)
@@ -2185,14 +2245,14 @@ enum skl_disp_power_wells {
2185#define GMBUS_HW_RDY (1<<11) 2245#define GMBUS_HW_RDY (1<<11)
2186#define GMBUS_SATOER (1<<10) 2246#define GMBUS_SATOER (1<<10)
2187#define GMBUS_ACTIVE (1<<9) 2247#define GMBUS_ACTIVE (1<<9)
2188#define GMBUS3 (dev_priv->gpio_mmio_base + 0x510c) /* data buffer bytes 3-0 */ 2248#define GMBUS3 _MMIO(dev_priv->gpio_mmio_base + 0x510c) /* data buffer bytes 3-0 */
2189#define GMBUS4 (dev_priv->gpio_mmio_base + 0x5110) /* interrupt mask (Pineview+) */ 2249#define GMBUS4 _MMIO(dev_priv->gpio_mmio_base + 0x5110) /* interrupt mask (Pineview+) */
2190#define GMBUS_SLAVE_TIMEOUT_EN (1<<4) 2250#define GMBUS_SLAVE_TIMEOUT_EN (1<<4)
2191#define GMBUS_NAK_EN (1<<3) 2251#define GMBUS_NAK_EN (1<<3)
2192#define GMBUS_IDLE_EN (1<<2) 2252#define GMBUS_IDLE_EN (1<<2)
2193#define GMBUS_HW_WAIT_EN (1<<1) 2253#define GMBUS_HW_WAIT_EN (1<<1)
2194#define GMBUS_HW_RDY_EN (1<<0) 2254#define GMBUS_HW_RDY_EN (1<<0)
2195#define GMBUS5 (dev_priv->gpio_mmio_base + 0x5120) /* byte index */ 2255#define GMBUS5 _MMIO(dev_priv->gpio_mmio_base + 0x5120) /* byte index */
2196#define GMBUS_2BYTE_INDEX_EN (1<<31) 2256#define GMBUS_2BYTE_INDEX_EN (1<<31)
2197 2257
2198/* 2258/*
@@ -2201,11 +2261,11 @@ enum skl_disp_power_wells {
2201#define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014) 2261#define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014)
2202#define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018) 2262#define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018)
2203#define _CHV_DPLL_C (dev_priv->info.display_mmio_offset + 0x6030) 2263#define _CHV_DPLL_C (dev_priv->info.display_mmio_offset + 0x6030)
2204#define DPLL(pipe) _PIPE3((pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C) 2264#define DPLL(pipe) _MMIO_PIPE3((pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C)
2205 2265
2206#define VGA0 0x6000 2266#define VGA0 _MMIO(0x6000)
2207#define VGA1 0x6004 2267#define VGA1 _MMIO(0x6004)
2208#define VGA_PD 0x6010 2268#define VGA_PD _MMIO(0x6010)
2209#define VGA0_PD_P2_DIV_4 (1 << 7) 2269#define VGA0_PD_P2_DIV_4 (1 << 7)
2210#define VGA0_PD_P1_DIV_2 (1 << 5) 2270#define VGA0_PD_P1_DIV_2 (1 << 5)
2211#define VGA0_PD_P1_SHIFT 0 2271#define VGA0_PD_P1_SHIFT 0
@@ -2241,9 +2301,9 @@ enum skl_disp_power_wells {
2241#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000 2301#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
2242 2302
2243/* Additional CHV pll/phy registers */ 2303/* Additional CHV pll/phy registers */
2244#define DPIO_PHY_STATUS (VLV_DISPLAY_BASE + 0x6240) 2304#define DPIO_PHY_STATUS _MMIO(VLV_DISPLAY_BASE + 0x6240)
2245#define DPLL_PORTD_READY_MASK (0xf) 2305#define DPLL_PORTD_READY_MASK (0xf)
2246#define DISPLAY_PHY_CONTROL (VLV_DISPLAY_BASE + 0x60100) 2306#define DISPLAY_PHY_CONTROL _MMIO(VLV_DISPLAY_BASE + 0x60100)
2247#define PHY_CH_POWER_DOWN_OVRD_EN(phy, ch) (1 << (2*(phy)+(ch)+27)) 2307#define PHY_CH_POWER_DOWN_OVRD_EN(phy, ch) (1 << (2*(phy)+(ch)+27))
2248#define PHY_LDO_DELAY_0NS 0x0 2308#define PHY_LDO_DELAY_0NS 0x0
2249#define PHY_LDO_DELAY_200NS 0x1 2309#define PHY_LDO_DELAY_200NS 0x1
@@ -2254,7 +2314,7 @@ enum skl_disp_power_wells {
2254#define PHY_CH_DEEP_PSR 0x7 2314#define PHY_CH_DEEP_PSR 0x7
2255#define PHY_CH_POWER_MODE(mode, phy, ch) ((mode) << (6*(phy)+3*(ch)+2)) 2315#define PHY_CH_POWER_MODE(mode, phy, ch) ((mode) << (6*(phy)+3*(ch)+2))
2256#define PHY_COM_LANE_RESET_DEASSERT(phy) (1 << (phy)) 2316#define PHY_COM_LANE_RESET_DEASSERT(phy) (1 << (phy))
2257#define DISPLAY_PHY_STATUS (VLV_DISPLAY_BASE + 0x60104) 2317#define DISPLAY_PHY_STATUS _MMIO(VLV_DISPLAY_BASE + 0x60104)
2258#define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1<<31) : (1<<30)) 2318#define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1<<31) : (1<<30))
2259#define PHY_STATUS_CMN_LDO(phy, ch) (1 << (6-(6*(phy)+3*(ch)))) 2319#define PHY_STATUS_CMN_LDO(phy, ch) (1 << (6-(6*(phy)+3*(ch))))
2260#define PHY_STATUS_SPLINE_LDO(phy, ch, spline) (1 << (8-(6*(phy)+3*(ch)+(spline)))) 2320#define PHY_STATUS_SPLINE_LDO(phy, ch, spline) (1 << (8-(6*(phy)+3*(ch)+(spline))))
@@ -2300,7 +2360,7 @@ enum skl_disp_power_wells {
2300#define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c) 2360#define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c)
2301#define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020) 2361#define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020)
2302#define _CHV_DPLL_C_MD (dev_priv->info.display_mmio_offset + 0x603c) 2362#define _CHV_DPLL_C_MD (dev_priv->info.display_mmio_offset + 0x603c)
2303#define DPLL_MD(pipe) _PIPE3((pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD) 2363#define DPLL_MD(pipe) _MMIO_PIPE3((pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD)
2304 2364
2305/* 2365/*
2306 * UDI pixel divider, controlling how many pixels are stuffed into a packet. 2366 * UDI pixel divider, controlling how many pixels are stuffed into a packet.
@@ -2339,12 +2399,12 @@ enum skl_disp_power_wells {
2339#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f 2399#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
2340#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 2400#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
2341 2401
2342#define _FPA0 0x06040 2402#define _FPA0 0x6040
2343#define _FPA1 0x06044 2403#define _FPA1 0x6044
2344#define _FPB0 0x06048 2404#define _FPB0 0x6048
2345#define _FPB1 0x0604c 2405#define _FPB1 0x604c
2346#define FP0(pipe) _PIPE(pipe, _FPA0, _FPB0) 2406#define FP0(pipe) _MMIO_PIPE(pipe, _FPA0, _FPB0)
2347#define FP1(pipe) _PIPE(pipe, _FPA1, _FPB1) 2407#define FP1(pipe) _MMIO_PIPE(pipe, _FPA1, _FPB1)
2348#define FP_N_DIV_MASK 0x003f0000 2408#define FP_N_DIV_MASK 0x003f0000
2349#define FP_N_PINEVIEW_DIV_MASK 0x00ff0000 2409#define FP_N_PINEVIEW_DIV_MASK 0x00ff0000
2350#define FP_N_DIV_SHIFT 16 2410#define FP_N_DIV_SHIFT 16
@@ -2353,7 +2413,7 @@ enum skl_disp_power_wells {
2353#define FP_M2_DIV_MASK 0x0000003f 2413#define FP_M2_DIV_MASK 0x0000003f
2354#define FP_M2_PINEVIEW_DIV_MASK 0x000000ff 2414#define FP_M2_PINEVIEW_DIV_MASK 0x000000ff
2355#define FP_M2_DIV_SHIFT 0 2415#define FP_M2_DIV_SHIFT 0
2356#define DPLL_TEST 0x606c 2416#define DPLL_TEST _MMIO(0x606c)
2357#define DPLLB_TEST_SDVO_DIV_1 (0 << 22) 2417#define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
2358#define DPLLB_TEST_SDVO_DIV_2 (1 << 22) 2418#define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
2359#define DPLLB_TEST_SDVO_DIV_4 (2 << 22) 2419#define DPLLB_TEST_SDVO_DIV_4 (2 << 22)
@@ -2364,12 +2424,12 @@ enum skl_disp_power_wells {
2364#define DPLLA_TEST_N_BYPASS (1 << 3) 2424#define DPLLA_TEST_N_BYPASS (1 << 3)
2365#define DPLLA_TEST_M_BYPASS (1 << 2) 2425#define DPLLA_TEST_M_BYPASS (1 << 2)
2366#define DPLLA_INPUT_BUFFER_ENABLE (1 << 0) 2426#define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
2367#define D_STATE 0x6104 2427#define D_STATE _MMIO(0x6104)
2368#define DSTATE_GFX_RESET_I830 (1<<6) 2428#define DSTATE_GFX_RESET_I830 (1<<6)
2369#define DSTATE_PLL_D3_OFF (1<<3) 2429#define DSTATE_PLL_D3_OFF (1<<3)
2370#define DSTATE_GFX_CLOCK_GATING (1<<1) 2430#define DSTATE_GFX_CLOCK_GATING (1<<1)
2371#define DSTATE_DOT_CLOCK_GATING (1<<0) 2431#define DSTATE_DOT_CLOCK_GATING (1<<0)
2372#define DSPCLK_GATE_D (dev_priv->info.display_mmio_offset + 0x6200) 2432#define DSPCLK_GATE_D _MMIO(dev_priv->info.display_mmio_offset + 0x6200)
2373# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */ 2433# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */
2374# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */ 2434# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */
2375# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */ 2435# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */
@@ -2408,7 +2468,7 @@ enum skl_disp_power_wells {
2408# define ZVUNIT_CLOCK_GATE_DISABLE (1 << 0) /* 830 */ 2468# define ZVUNIT_CLOCK_GATE_DISABLE (1 << 0) /* 830 */
2409# define OVLUNIT_CLOCK_GATE_DISABLE (1 << 0) /* 845,865 */ 2469# define OVLUNIT_CLOCK_GATE_DISABLE (1 << 0) /* 845,865 */
2410 2470
2411#define RENCLK_GATE_D1 0x6204 2471#define RENCLK_GATE_D1 _MMIO(0x6204)
2412# define BLITTER_CLOCK_GATE_DISABLE (1 << 13) /* 945GM only */ 2472# define BLITTER_CLOCK_GATE_DISABLE (1 << 13) /* 945GM only */
2413# define MPEG_CLOCK_GATE_DISABLE (1 << 12) /* 945GM only */ 2473# define MPEG_CLOCK_GATE_DISABLE (1 << 12) /* 945GM only */
2414# define PC_FE_CLOCK_GATE_DISABLE (1 << 11) 2474# define PC_FE_CLOCK_GATE_DISABLE (1 << 11)
@@ -2472,35 +2532,35 @@ enum skl_disp_power_wells {
2472# define I965_FT_CLOCK_GATE_DISABLE (1 << 1) 2532# define I965_FT_CLOCK_GATE_DISABLE (1 << 1)
2473# define I965_DM_CLOCK_GATE_DISABLE (1 << 0) 2533# define I965_DM_CLOCK_GATE_DISABLE (1 << 0)
2474 2534
2475#define RENCLK_GATE_D2 0x6208 2535#define RENCLK_GATE_D2 _MMIO(0x6208)
2476#define VF_UNIT_CLOCK_GATE_DISABLE (1 << 9) 2536#define VF_UNIT_CLOCK_GATE_DISABLE (1 << 9)
2477#define GS_UNIT_CLOCK_GATE_DISABLE (1 << 7) 2537#define GS_UNIT_CLOCK_GATE_DISABLE (1 << 7)
2478#define CL_UNIT_CLOCK_GATE_DISABLE (1 << 6) 2538#define CL_UNIT_CLOCK_GATE_DISABLE (1 << 6)
2479 2539
2480#define VDECCLK_GATE_D 0x620C /* g4x only */ 2540#define VDECCLK_GATE_D _MMIO(0x620C) /* g4x only */
2481#define VCP_UNIT_CLOCK_GATE_DISABLE (1 << 4) 2541#define VCP_UNIT_CLOCK_GATE_DISABLE (1 << 4)
2482 2542
2483#define RAMCLK_GATE_D 0x6210 /* CRL only */ 2543#define RAMCLK_GATE_D _MMIO(0x6210) /* CRL only */
2484#define DEUC 0x6214 /* CRL only */ 2544#define DEUC _MMIO(0x6214) /* CRL only */
2485 2545
2486#define FW_BLC_SELF_VLV (VLV_DISPLAY_BASE + 0x6500) 2546#define FW_BLC_SELF_VLV _MMIO(VLV_DISPLAY_BASE + 0x6500)
2487#define FW_CSPWRDWNEN (1<<15) 2547#define FW_CSPWRDWNEN (1<<15)
2488 2548
2489#define MI_ARB_VLV (VLV_DISPLAY_BASE + 0x6504) 2549#define MI_ARB_VLV _MMIO(VLV_DISPLAY_BASE + 0x6504)
2490 2550
2491#define CZCLK_CDCLK_FREQ_RATIO (VLV_DISPLAY_BASE + 0x6508) 2551#define CZCLK_CDCLK_FREQ_RATIO _MMIO(VLV_DISPLAY_BASE + 0x6508)
2492#define CDCLK_FREQ_SHIFT 4 2552#define CDCLK_FREQ_SHIFT 4
2493#define CDCLK_FREQ_MASK (0x1f << CDCLK_FREQ_SHIFT) 2553#define CDCLK_FREQ_MASK (0x1f << CDCLK_FREQ_SHIFT)
2494#define CZCLK_FREQ_MASK 0xf 2554#define CZCLK_FREQ_MASK 0xf
2495 2555
2496#define GCI_CONTROL (VLV_DISPLAY_BASE + 0x650C) 2556#define GCI_CONTROL _MMIO(VLV_DISPLAY_BASE + 0x650C)
2497#define PFI_CREDIT_63 (9 << 28) /* chv only */ 2557#define PFI_CREDIT_63 (9 << 28) /* chv only */
2498#define PFI_CREDIT_31 (8 << 28) /* chv only */ 2558#define PFI_CREDIT_31 (8 << 28) /* chv only */
2499#define PFI_CREDIT(x) (((x) - 8) << 28) /* 8-15 */ 2559#define PFI_CREDIT(x) (((x) - 8) << 28) /* 8-15 */
2500#define PFI_CREDIT_RESEND (1 << 27) 2560#define PFI_CREDIT_RESEND (1 << 27)
2501#define VGA_FAST_MODE_DISABLE (1 << 14) 2561#define VGA_FAST_MODE_DISABLE (1 << 14)
2502 2562
2503#define GMBUSFREQ_VLV (VLV_DISPLAY_BASE + 0x6510) 2563#define GMBUSFREQ_VLV _MMIO(VLV_DISPLAY_BASE + 0x6510)
2504 2564
2505/* 2565/*
2506 * Palette regs 2566 * Palette regs
@@ -2508,8 +2568,8 @@ enum skl_disp_power_wells {
2508#define PALETTE_A_OFFSET 0xa000 2568#define PALETTE_A_OFFSET 0xa000
2509#define PALETTE_B_OFFSET 0xa800 2569#define PALETTE_B_OFFSET 0xa800
2510#define CHV_PALETTE_C_OFFSET 0xc000 2570#define CHV_PALETTE_C_OFFSET 0xc000
2511#define PALETTE(pipe, i) (dev_priv->info.palette_offsets[pipe] + \ 2571#define PALETTE(pipe, i) _MMIO(dev_priv->info.palette_offsets[pipe] + \
2512 dev_priv->info.display_mmio_offset + (i) * 4) 2572 dev_priv->info.display_mmio_offset + (i) * 4)
2513 2573
2514/* MCH MMIO space */ 2574/* MCH MMIO space */
2515 2575
@@ -2527,37 +2587,37 @@ enum skl_disp_power_wells {
2527 2587
2528#define MCHBAR_MIRROR_BASE_SNB 0x140000 2588#define MCHBAR_MIRROR_BASE_SNB 0x140000
2529 2589
2530#define CTG_STOLEN_RESERVED (MCHBAR_MIRROR_BASE + 0x34) 2590#define CTG_STOLEN_RESERVED _MMIO(MCHBAR_MIRROR_BASE + 0x34)
2531#define ELK_STOLEN_RESERVED (MCHBAR_MIRROR_BASE + 0x48) 2591#define ELK_STOLEN_RESERVED _MMIO(MCHBAR_MIRROR_BASE + 0x48)
2532#define G4X_STOLEN_RESERVED_ADDR1_MASK (0xFFFF << 16) 2592#define G4X_STOLEN_RESERVED_ADDR1_MASK (0xFFFF << 16)
2533#define G4X_STOLEN_RESERVED_ADDR2_MASK (0xFFF << 4) 2593#define G4X_STOLEN_RESERVED_ADDR2_MASK (0xFFF << 4)
2534 2594
2535/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */ 2595/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */
2536#define DCLK (MCHBAR_MIRROR_BASE_SNB + 0x5e04) 2596#define DCLK _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5e04)
2537 2597
2538/* 915-945 and GM965 MCH register controlling DRAM channel access */ 2598/* 915-945 and GM965 MCH register controlling DRAM channel access */
2539#define DCC 0x10200 2599#define DCC _MMIO(MCHBAR_MIRROR_BASE + 0x200)
2540#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0) 2600#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
2541#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0) 2601#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0)
2542#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0) 2602#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0)
2543#define DCC_ADDRESSING_MODE_MASK (3 << 0) 2603#define DCC_ADDRESSING_MODE_MASK (3 << 0)
2544#define DCC_CHANNEL_XOR_DISABLE (1 << 10) 2604#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
2545#define DCC_CHANNEL_XOR_BIT_17 (1 << 9) 2605#define DCC_CHANNEL_XOR_BIT_17 (1 << 9)
2546#define DCC2 0x10204 2606#define DCC2 _MMIO(MCHBAR_MIRROR_BASE + 0x204)
2547#define DCC2_MODIFIED_ENHANCED_DISABLE (1 << 20) 2607#define DCC2_MODIFIED_ENHANCED_DISABLE (1 << 20)
2548 2608
2549/* Pineview MCH register contains DDR3 setting */ 2609/* Pineview MCH register contains DDR3 setting */
2550#define CSHRDDR3CTL 0x101a8 2610#define CSHRDDR3CTL _MMIO(MCHBAR_MIRROR_BASE + 0x1a8)
2551#define CSHRDDR3CTL_DDR3 (1 << 2) 2611#define CSHRDDR3CTL_DDR3 (1 << 2)
2552 2612
2553/* 965 MCH register controlling DRAM channel configuration */ 2613/* 965 MCH register controlling DRAM channel configuration */
2554#define C0DRB3 0x10206 2614#define C0DRB3 _MMIO(MCHBAR_MIRROR_BASE + 0x206)
2555#define C1DRB3 0x10606 2615#define C1DRB3 _MMIO(MCHBAR_MIRROR_BASE + 0x606)
2556 2616
2557/* snb MCH registers for reading the DRAM channel configuration */ 2617/* snb MCH registers for reading the DRAM channel configuration */
2558#define MAD_DIMM_C0 (MCHBAR_MIRROR_BASE_SNB + 0x5004) 2618#define MAD_DIMM_C0 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5004)
2559#define MAD_DIMM_C1 (MCHBAR_MIRROR_BASE_SNB + 0x5008) 2619#define MAD_DIMM_C1 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5008)
2560#define MAD_DIMM_C2 (MCHBAR_MIRROR_BASE_SNB + 0x500C) 2620#define MAD_DIMM_C2 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x500C)
2561#define MAD_DIMM_ECC_MASK (0x3 << 24) 2621#define MAD_DIMM_ECC_MASK (0x3 << 24)
2562#define MAD_DIMM_ECC_OFF (0x0 << 24) 2622#define MAD_DIMM_ECC_OFF (0x0 << 24)
2563#define MAD_DIMM_ECC_IO_ON_LOGIC_OFF (0x1 << 24) 2623#define MAD_DIMM_ECC_IO_ON_LOGIC_OFF (0x1 << 24)
@@ -2577,14 +2637,14 @@ enum skl_disp_power_wells {
2577#define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT) 2637#define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT)
2578 2638
2579/* snb MCH registers for priority tuning */ 2639/* snb MCH registers for priority tuning */
2580#define MCH_SSKPD (MCHBAR_MIRROR_BASE_SNB + 0x5d10) 2640#define MCH_SSKPD _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5d10)
2581#define MCH_SSKPD_WM0_MASK 0x3f 2641#define MCH_SSKPD_WM0_MASK 0x3f
2582#define MCH_SSKPD_WM0_VAL 0xc 2642#define MCH_SSKPD_WM0_VAL 0xc
2583 2643
2584#define MCH_SECP_NRG_STTS (MCHBAR_MIRROR_BASE_SNB + 0x592c) 2644#define MCH_SECP_NRG_STTS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x592c)
2585 2645
2586/* Clocking configuration register */ 2646/* Clocking configuration register */
2587#define CLKCFG 0x10c00 2647#define CLKCFG _MMIO(MCHBAR_MIRROR_BASE + 0xc00)
2588#define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */ 2648#define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */
2589#define CLKCFG_FSB_533 (1 << 0) /* hrawclk 133 */ 2649#define CLKCFG_FSB_533 (1 << 0) /* hrawclk 133 */
2590#define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */ 2650#define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */
@@ -2600,26 +2660,26 @@ enum skl_disp_power_wells {
2600#define CLKCFG_MEM_800 (3 << 4) 2660#define CLKCFG_MEM_800 (3 << 4)
2601#define CLKCFG_MEM_MASK (7 << 4) 2661#define CLKCFG_MEM_MASK (7 << 4)
2602 2662
2603#define HPLLVCO (MCHBAR_MIRROR_BASE + 0xc38) 2663#define HPLLVCO _MMIO(MCHBAR_MIRROR_BASE + 0xc38)
2604#define HPLLVCO_MOBILE (MCHBAR_MIRROR_BASE + 0xc0f) 2664#define HPLLVCO_MOBILE _MMIO(MCHBAR_MIRROR_BASE + 0xc0f)
2605 2665
2606#define TSC1 0x11001 2666#define TSC1 _MMIO(0x11001)
2607#define TSE (1<<0) 2667#define TSE (1<<0)
2608#define TR1 0x11006 2668#define TR1 _MMIO(0x11006)
2609#define TSFS 0x11020 2669#define TSFS _MMIO(0x11020)
2610#define TSFS_SLOPE_MASK 0x0000ff00 2670#define TSFS_SLOPE_MASK 0x0000ff00
2611#define TSFS_SLOPE_SHIFT 8 2671#define TSFS_SLOPE_SHIFT 8
2612#define TSFS_INTR_MASK 0x000000ff 2672#define TSFS_INTR_MASK 0x000000ff
2613 2673
2614#define CRSTANDVID 0x11100 2674#define CRSTANDVID _MMIO(0x11100)
2615#define PXVFREQ(i) (0x11110 + (i) * 4) /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */ 2675#define PXVFREQ(fstart) _MMIO(0x11110 + (fstart) * 4) /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */
2616#define PXVFREQ_PX_MASK 0x7f000000 2676#define PXVFREQ_PX_MASK 0x7f000000
2617#define PXVFREQ_PX_SHIFT 24 2677#define PXVFREQ_PX_SHIFT 24
2618#define VIDFREQ_BASE 0x11110 2678#define VIDFREQ_BASE _MMIO(0x11110)
2619#define VIDFREQ1 0x11110 /* VIDFREQ1-4 (0x1111c) (Cantiga) */ 2679#define VIDFREQ1 _MMIO(0x11110) /* VIDFREQ1-4 (0x1111c) (Cantiga) */
2620#define VIDFREQ2 0x11114 2680#define VIDFREQ2 _MMIO(0x11114)
2621#define VIDFREQ3 0x11118 2681#define VIDFREQ3 _MMIO(0x11118)
2622#define VIDFREQ4 0x1111c 2682#define VIDFREQ4 _MMIO(0x1111c)
2623#define VIDFREQ_P0_MASK 0x1f000000 2683#define VIDFREQ_P0_MASK 0x1f000000
2624#define VIDFREQ_P0_SHIFT 24 2684#define VIDFREQ_P0_SHIFT 24
2625#define VIDFREQ_P0_CSCLK_MASK 0x00f00000 2685#define VIDFREQ_P0_CSCLK_MASK 0x00f00000
@@ -2631,8 +2691,8 @@ enum skl_disp_power_wells {
2631#define VIDFREQ_P1_CSCLK_MASK 0x000000f0 2691#define VIDFREQ_P1_CSCLK_MASK 0x000000f0
2632#define VIDFREQ_P1_CSCLK_SHIFT 4 2692#define VIDFREQ_P1_CSCLK_SHIFT 4
2633#define VIDFREQ_P1_CRCLK_MASK 0x0000000f 2693#define VIDFREQ_P1_CRCLK_MASK 0x0000000f
2634#define INTTOEXT_BASE_ILK 0x11300 2694#define INTTOEXT_BASE_ILK _MMIO(0x11300)
2635#define INTTOEXT_BASE 0x11120 /* INTTOEXT1-8 (0x1113c) */ 2695#define INTTOEXT_BASE _MMIO(0x11120) /* INTTOEXT1-8 (0x1113c) */
2636#define INTTOEXT_MAP3_SHIFT 24 2696#define INTTOEXT_MAP3_SHIFT 24
2637#define INTTOEXT_MAP3_MASK (0x1f << INTTOEXT_MAP3_SHIFT) 2697#define INTTOEXT_MAP3_MASK (0x1f << INTTOEXT_MAP3_SHIFT)
2638#define INTTOEXT_MAP2_SHIFT 16 2698#define INTTOEXT_MAP2_SHIFT 16
@@ -2641,7 +2701,7 @@ enum skl_disp_power_wells {
2641#define INTTOEXT_MAP1_MASK (0x1f << INTTOEXT_MAP1_SHIFT) 2701#define INTTOEXT_MAP1_MASK (0x1f << INTTOEXT_MAP1_SHIFT)
2642#define INTTOEXT_MAP0_SHIFT 0 2702#define INTTOEXT_MAP0_SHIFT 0
2643#define INTTOEXT_MAP0_MASK (0x1f << INTTOEXT_MAP0_SHIFT) 2703#define INTTOEXT_MAP0_MASK (0x1f << INTTOEXT_MAP0_SHIFT)
2644#define MEMSWCTL 0x11170 /* Ironlake only */ 2704#define MEMSWCTL _MMIO(0x11170) /* Ironlake only */
2645#define MEMCTL_CMD_MASK 0xe000 2705#define MEMCTL_CMD_MASK 0xe000
2646#define MEMCTL_CMD_SHIFT 13 2706#define MEMCTL_CMD_SHIFT 13
2647#define MEMCTL_CMD_RCLK_OFF 0 2707#define MEMCTL_CMD_RCLK_OFF 0
@@ -2656,8 +2716,8 @@ enum skl_disp_power_wells {
2656#define MEMCTL_FREQ_SHIFT 8 2716#define MEMCTL_FREQ_SHIFT 8
2657#define MEMCTL_SFCAVM (1<<7) 2717#define MEMCTL_SFCAVM (1<<7)
2658#define MEMCTL_TGT_VID_MASK 0x007f 2718#define MEMCTL_TGT_VID_MASK 0x007f
2659#define MEMIHYST 0x1117c 2719#define MEMIHYST _MMIO(0x1117c)
2660#define MEMINTREN 0x11180 /* 16 bits */ 2720#define MEMINTREN _MMIO(0x11180) /* 16 bits */
2661#define MEMINT_RSEXIT_EN (1<<8) 2721#define MEMINT_RSEXIT_EN (1<<8)
2662#define MEMINT_CX_SUPR_EN (1<<7) 2722#define MEMINT_CX_SUPR_EN (1<<7)
2663#define MEMINT_CONT_BUSY_EN (1<<6) 2723#define MEMINT_CONT_BUSY_EN (1<<6)
@@ -2667,7 +2727,7 @@ enum skl_disp_power_wells {
2667#define MEMINT_UP_EVAL_EN (1<<2) 2727#define MEMINT_UP_EVAL_EN (1<<2)
2668#define MEMINT_DOWN_EVAL_EN (1<<1) 2728#define MEMINT_DOWN_EVAL_EN (1<<1)
2669#define MEMINT_SW_CMD_EN (1<<0) 2729#define MEMINT_SW_CMD_EN (1<<0)
2670#define MEMINTRSTR 0x11182 /* 16 bits */ 2730#define MEMINTRSTR _MMIO(0x11182) /* 16 bits */
2671#define MEM_RSEXIT_MASK 0xc000 2731#define MEM_RSEXIT_MASK 0xc000
2672#define MEM_RSEXIT_SHIFT 14 2732#define MEM_RSEXIT_SHIFT 14
2673#define MEM_CONT_BUSY_MASK 0x3000 2733#define MEM_CONT_BUSY_MASK 0x3000
@@ -2687,7 +2747,7 @@ enum skl_disp_power_wells {
2687#define MEM_INT_STEER_CMR 1 2747#define MEM_INT_STEER_CMR 1
2688#define MEM_INT_STEER_SMI 2 2748#define MEM_INT_STEER_SMI 2
2689#define MEM_INT_STEER_SCI 3 2749#define MEM_INT_STEER_SCI 3
2690#define MEMINTRSTS 0x11184 2750#define MEMINTRSTS _MMIO(0x11184)
2691#define MEMINT_RSEXIT (1<<7) 2751#define MEMINT_RSEXIT (1<<7)
2692#define MEMINT_CONT_BUSY (1<<6) 2752#define MEMINT_CONT_BUSY (1<<6)
2693#define MEMINT_AVG_BUSY (1<<5) 2753#define MEMINT_AVG_BUSY (1<<5)
@@ -2696,7 +2756,7 @@ enum skl_disp_power_wells {
2696#define MEMINT_UP_EVAL (1<<2) 2756#define MEMINT_UP_EVAL (1<<2)
2697#define MEMINT_DOWN_EVAL (1<<1) 2757#define MEMINT_DOWN_EVAL (1<<1)
2698#define MEMINT_SW_CMD (1<<0) 2758#define MEMINT_SW_CMD (1<<0)
2699#define MEMMODECTL 0x11190 2759#define MEMMODECTL _MMIO(0x11190)
2700#define MEMMODE_BOOST_EN (1<<31) 2760#define MEMMODE_BOOST_EN (1<<31)
2701#define MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */ 2761#define MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */
2702#define MEMMODE_BOOST_FREQ_SHIFT 24 2762#define MEMMODE_BOOST_FREQ_SHIFT 24
@@ -2713,8 +2773,8 @@ enum skl_disp_power_wells {
2713#define MEMMODE_FMAX_MASK 0x000000f0 /* max jitter, 0-15 */ 2773#define MEMMODE_FMAX_MASK 0x000000f0 /* max jitter, 0-15 */
2714#define MEMMODE_FMAX_SHIFT 4 2774#define MEMMODE_FMAX_SHIFT 4
2715#define MEMMODE_FMIN_MASK 0x0000000f /* min jitter, 0-15 */ 2775#define MEMMODE_FMIN_MASK 0x0000000f /* min jitter, 0-15 */
2716#define RCBMAXAVG 0x1119c 2776#define RCBMAXAVG _MMIO(0x1119c)
2717#define MEMSWCTL2 0x1119e /* Cantiga only */ 2777#define MEMSWCTL2 _MMIO(0x1119e) /* Cantiga only */
2718#define SWMEMCMD_RENDER_OFF (0 << 13) 2778#define SWMEMCMD_RENDER_OFF (0 << 13)
2719#define SWMEMCMD_RENDER_ON (1 << 13) 2779#define SWMEMCMD_RENDER_ON (1 << 13)
2720#define SWMEMCMD_SWFREQ (2 << 13) 2780#define SWMEMCMD_SWFREQ (2 << 13)
@@ -2726,11 +2786,11 @@ enum skl_disp_power_wells {
2726#define SWFREQ_MASK 0x0380 /* P0-7 */ 2786#define SWFREQ_MASK 0x0380 /* P0-7 */
2727#define SWFREQ_SHIFT 7 2787#define SWFREQ_SHIFT 7
2728#define TARVID_MASK 0x001f 2788#define TARVID_MASK 0x001f
2729#define MEMSTAT_CTG 0x111a0 2789#define MEMSTAT_CTG _MMIO(0x111a0)
2730#define RCBMINAVG 0x111a0 2790#define RCBMINAVG _MMIO(0x111a0)
2731#define RCUPEI 0x111b0 2791#define RCUPEI _MMIO(0x111b0)
2732#define RCDNEI 0x111b4 2792#define RCDNEI _MMIO(0x111b4)
2733#define RSTDBYCTL 0x111b8 2793#define RSTDBYCTL _MMIO(0x111b8)
2734#define RS1EN (1<<31) 2794#define RS1EN (1<<31)
2735#define RS2EN (1<<30) 2795#define RS2EN (1<<30)
2736#define RS3EN (1<<29) 2796#define RS3EN (1<<29)
@@ -2774,10 +2834,10 @@ enum skl_disp_power_wells {
2774#define RS_CSTATE_C367_RS2 (3<<4) 2834#define RS_CSTATE_C367_RS2 (3<<4)
2775#define REDSAVES (1<<3) /* no context save if was idle during rs0 */ 2835#define REDSAVES (1<<3) /* no context save if was idle during rs0 */
2776#define REDRESTORES (1<<2) /* no restore if was idle during rs0 */ 2836#define REDRESTORES (1<<2) /* no restore if was idle during rs0 */
2777#define VIDCTL 0x111c0 2837#define VIDCTL _MMIO(0x111c0)
2778#define VIDSTS 0x111c8 2838#define VIDSTS _MMIO(0x111c8)
2779#define VIDSTART 0x111cc /* 8 bits */ 2839#define VIDSTART _MMIO(0x111cc) /* 8 bits */
2780#define MEMSTAT_ILK 0x111f8 2840#define MEMSTAT_ILK _MMIO(0x111f8)
2781#define MEMSTAT_VID_MASK 0x7f00 2841#define MEMSTAT_VID_MASK 0x7f00
2782#define MEMSTAT_VID_SHIFT 8 2842#define MEMSTAT_VID_SHIFT 8
2783#define MEMSTAT_PSTATE_MASK 0x00f8 2843#define MEMSTAT_PSTATE_MASK 0x00f8
@@ -2788,55 +2848,55 @@ enum skl_disp_power_wells {
2788#define MEMSTAT_SRC_CTL_TRB 1 2848#define MEMSTAT_SRC_CTL_TRB 1
2789#define MEMSTAT_SRC_CTL_THM 2 2849#define MEMSTAT_SRC_CTL_THM 2
2790#define MEMSTAT_SRC_CTL_STDBY 3 2850#define MEMSTAT_SRC_CTL_STDBY 3
2791#define RCPREVBSYTUPAVG 0x113b8 2851#define RCPREVBSYTUPAVG _MMIO(0x113b8)
2792#define RCPREVBSYTDNAVG 0x113bc 2852#define RCPREVBSYTDNAVG _MMIO(0x113bc)
2793#define PMMISC 0x11214 2853#define PMMISC _MMIO(0x11214)
2794#define MCPPCE_EN (1<<0) /* enable PM_MSG from PCH->MPC */ 2854#define MCPPCE_EN (1<<0) /* enable PM_MSG from PCH->MPC */
2795#define SDEW 0x1124c 2855#define SDEW _MMIO(0x1124c)
2796#define CSIEW0 0x11250 2856#define CSIEW0 _MMIO(0x11250)
2797#define CSIEW1 0x11254 2857#define CSIEW1 _MMIO(0x11254)
2798#define CSIEW2 0x11258 2858#define CSIEW2 _MMIO(0x11258)
2799#define PEW(i) (0x1125c + (i) * 4) /* 5 registers */ 2859#define PEW(i) _MMIO(0x1125c + (i) * 4) /* 5 registers */
2800#define DEW(i) (0x11270 + (i) * 4) /* 3 registers */ 2860#define DEW(i) _MMIO(0x11270 + (i) * 4) /* 3 registers */
2801#define MCHAFE 0x112c0 2861#define MCHAFE _MMIO(0x112c0)
2802#define CSIEC 0x112e0 2862#define CSIEC _MMIO(0x112e0)
2803#define DMIEC 0x112e4 2863#define DMIEC _MMIO(0x112e4)
2804#define DDREC 0x112e8 2864#define DDREC _MMIO(0x112e8)
2805#define PEG0EC 0x112ec 2865#define PEG0EC _MMIO(0x112ec)
2806#define PEG1EC 0x112f0 2866#define PEG1EC _MMIO(0x112f0)
2807#define GFXEC 0x112f4 2867#define GFXEC _MMIO(0x112f4)
2808#define RPPREVBSYTUPAVG 0x113b8 2868#define RPPREVBSYTUPAVG _MMIO(0x113b8)
2809#define RPPREVBSYTDNAVG 0x113bc 2869#define RPPREVBSYTDNAVG _MMIO(0x113bc)
2810#define ECR 0x11600 2870#define ECR _MMIO(0x11600)
2811#define ECR_GPFE (1<<31) 2871#define ECR_GPFE (1<<31)
2812#define ECR_IMONE (1<<30) 2872#define ECR_IMONE (1<<30)
2813#define ECR_CAP_MASK 0x0000001f /* Event range, 0-31 */ 2873#define ECR_CAP_MASK 0x0000001f /* Event range, 0-31 */
2814#define OGW0 0x11608 2874#define OGW0 _MMIO(0x11608)
2815#define OGW1 0x1160c 2875#define OGW1 _MMIO(0x1160c)
2816#define EG0 0x11610 2876#define EG0 _MMIO(0x11610)
2817#define EG1 0x11614 2877#define EG1 _MMIO(0x11614)
2818#define EG2 0x11618 2878#define EG2 _MMIO(0x11618)
2819#define EG3 0x1161c 2879#define EG3 _MMIO(0x1161c)
2820#define EG4 0x11620 2880#define EG4 _MMIO(0x11620)
2821#define EG5 0x11624 2881#define EG5 _MMIO(0x11624)
2822#define EG6 0x11628 2882#define EG6 _MMIO(0x11628)
2823#define EG7 0x1162c 2883#define EG7 _MMIO(0x1162c)
2824#define PXW(i) (0x11664 + (i) * 4) /* 4 registers */ 2884#define PXW(i) _MMIO(0x11664 + (i) * 4) /* 4 registers */
2825#define PXWL(i) (0x11680 + (i) * 4) /* 8 registers */ 2885#define PXWL(i) _MMIO(0x11680 + (i) * 8) /* 8 registers */
2826#define LCFUSE02 0x116c0 2886#define LCFUSE02 _MMIO(0x116c0)
2827#define LCFUSE_HIV_MASK 0x000000ff 2887#define LCFUSE_HIV_MASK 0x000000ff
2828#define CSIPLL0 0x12c10 2888#define CSIPLL0 _MMIO(0x12c10)
2829#define DDRMPLL1 0X12c20 2889#define DDRMPLL1 _MMIO(0X12c20)
2830#define PEG_BAND_GAP_DATA 0x14d68 2890#define PEG_BAND_GAP_DATA _MMIO(0x14d68)
2831 2891
2832#define GEN6_GT_THREAD_STATUS_REG 0x13805c 2892#define GEN6_GT_THREAD_STATUS_REG _MMIO(0x13805c)
2833#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7 2893#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
2834 2894
2835#define GEN6_GT_PERF_STATUS (MCHBAR_MIRROR_BASE_SNB + 0x5948) 2895#define GEN6_GT_PERF_STATUS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5948)
2836#define BXT_GT_PERF_STATUS (MCHBAR_MIRROR_BASE_SNB + 0x7070) 2896#define BXT_GT_PERF_STATUS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x7070)
2837#define GEN6_RP_STATE_LIMITS (MCHBAR_MIRROR_BASE_SNB + 0x5994) 2897#define GEN6_RP_STATE_LIMITS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5994)
2838#define GEN6_RP_STATE_CAP (MCHBAR_MIRROR_BASE_SNB + 0x5998) 2898#define GEN6_RP_STATE_CAP _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998)
2839#define BXT_RP_STATE_CAP 0x138170 2899#define BXT_RP_STATE_CAP _MMIO(0x138170)
2840 2900
2841#define INTERVAL_1_28_US(us) (((us) * 100) >> 7) 2901#define INTERVAL_1_28_US(us) (((us) * 100) >> 7)
2842#define INTERVAL_1_33_US(us) (((us) * 3) >> 2) 2902#define INTERVAL_1_33_US(us) (((us) * 3) >> 2)
@@ -2850,7 +2910,7 @@ enum skl_disp_power_wells {
2850/* 2910/*
2851 * Logical Context regs 2911 * Logical Context regs
2852 */ 2912 */
2853#define CCID 0x2180 2913#define CCID _MMIO(0x2180)
2854#define CCID_EN (1<<0) 2914#define CCID_EN (1<<0)
2855/* 2915/*
2856 * Notes on SNB/IVB/VLV context size: 2916 * Notes on SNB/IVB/VLV context size:
@@ -2865,7 +2925,7 @@ enum skl_disp_power_wells {
2865 * - GT1 size just indicates how much of render context 2925 * - GT1 size just indicates how much of render context
2866 * doesn't need saving on GT1 2926 * doesn't need saving on GT1
2867 */ 2927 */
2868#define CXT_SIZE 0x21a0 2928#define CXT_SIZE _MMIO(0x21a0)
2869#define GEN6_CXT_POWER_SIZE(cxt_reg) (((cxt_reg) >> 24) & 0x3f) 2929#define GEN6_CXT_POWER_SIZE(cxt_reg) (((cxt_reg) >> 24) & 0x3f)
2870#define GEN6_CXT_RING_SIZE(cxt_reg) (((cxt_reg) >> 18) & 0x3f) 2930#define GEN6_CXT_RING_SIZE(cxt_reg) (((cxt_reg) >> 18) & 0x3f)
2871#define GEN6_CXT_RENDER_SIZE(cxt_reg) (((cxt_reg) >> 12) & 0x3f) 2931#define GEN6_CXT_RENDER_SIZE(cxt_reg) (((cxt_reg) >> 12) & 0x3f)
@@ -2874,7 +2934,7 @@ enum skl_disp_power_wells {
2874#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_RING_SIZE(cxt_reg) + \ 2934#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_RING_SIZE(cxt_reg) + \
2875 GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \ 2935 GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \
2876 GEN6_CXT_PIPELINE_SIZE(cxt_reg)) 2936 GEN6_CXT_PIPELINE_SIZE(cxt_reg))
2877#define GEN7_CXT_SIZE 0x21a8 2937#define GEN7_CXT_SIZE _MMIO(0x21a8)
2878#define GEN7_CXT_POWER_SIZE(ctx_reg) (((ctx_reg) >> 25) & 0x7f) 2938#define GEN7_CXT_POWER_SIZE(ctx_reg) (((ctx_reg) >> 25) & 0x7f)
2879#define GEN7_CXT_RING_SIZE(ctx_reg) (((ctx_reg) >> 22) & 0x7) 2939#define GEN7_CXT_RING_SIZE(ctx_reg) (((ctx_reg) >> 22) & 0x7)
2880#define GEN7_CXT_RENDER_SIZE(ctx_reg) (((ctx_reg) >> 16) & 0x3f) 2940#define GEN7_CXT_RENDER_SIZE(ctx_reg) (((ctx_reg) >> 16) & 0x3f)
@@ -2894,23 +2954,30 @@ enum skl_disp_power_wells {
2894/* Same as Haswell, but 72064 bytes now. */ 2954/* Same as Haswell, but 72064 bytes now. */
2895#define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE) 2955#define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE)
2896 2956
2897#define CHV_CLK_CTL1 0x101100 2957#define CHV_CLK_CTL1 _MMIO(0x101100)
2898#define VLV_CLK_CTL2 0x101104 2958#define VLV_CLK_CTL2 _MMIO(0x101104)
2899#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28 2959#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28
2900 2960
2901/* 2961/*
2902 * Overlay regs 2962 * Overlay regs
2903 */ 2963 */
2904 2964
2905#define OVADD 0x30000 2965#define OVADD _MMIO(0x30000)
2906#define DOVSTA 0x30008 2966#define DOVSTA _MMIO(0x30008)
2907#define OC_BUF (0x3<<20) 2967#define OC_BUF (0x3<<20)
2908#define OGAMC5 0x30010 2968#define OGAMC5 _MMIO(0x30010)
2909#define OGAMC4 0x30014 2969#define OGAMC4 _MMIO(0x30014)
2910#define OGAMC3 0x30018 2970#define OGAMC3 _MMIO(0x30018)
2911#define OGAMC2 0x3001c 2971#define OGAMC2 _MMIO(0x3001c)
2912#define OGAMC1 0x30020 2972#define OGAMC1 _MMIO(0x30020)
2913#define OGAMC0 0x30024 2973#define OGAMC0 _MMIO(0x30024)
2974
2975/*
2976 * GEN9 clock gating regs
2977 */
2978#define GEN9_CLKGATE_DIS_0 _MMIO(0x46530)
2979#define PWM2_GATING_DIS (1 << 14)
2980#define PWM1_GATING_DIS (1 << 13)
2914 2981
2915/* 2982/*
2916 * Display engine regs 2983 * Display engine regs
@@ -2970,28 +3037,18 @@ enum skl_disp_power_wells {
2970#define _PIPE_CRC_RES_4_B_IVB 0x61070 3037#define _PIPE_CRC_RES_4_B_IVB 0x61070
2971#define _PIPE_CRC_RES_5_B_IVB 0x61074 3038#define _PIPE_CRC_RES_5_B_IVB 0x61074
2972 3039
2973#define PIPE_CRC_CTL(pipe) _TRANSCODER2(pipe, _PIPE_CRC_CTL_A) 3040#define PIPE_CRC_CTL(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_CTL_A)
2974#define PIPE_CRC_RES_1_IVB(pipe) \ 3041#define PIPE_CRC_RES_1_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_1_A_IVB)
2975 _TRANSCODER2(pipe, _PIPE_CRC_RES_1_A_IVB) 3042#define PIPE_CRC_RES_2_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_2_A_IVB)
2976#define PIPE_CRC_RES_2_IVB(pipe) \ 3043#define PIPE_CRC_RES_3_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_3_A_IVB)
2977 _TRANSCODER2(pipe, _PIPE_CRC_RES_2_A_IVB) 3044#define PIPE_CRC_RES_4_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_4_A_IVB)
2978#define PIPE_CRC_RES_3_IVB(pipe) \ 3045#define PIPE_CRC_RES_5_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_5_A_IVB)
2979 _TRANSCODER2(pipe, _PIPE_CRC_RES_3_A_IVB) 3046
2980#define PIPE_CRC_RES_4_IVB(pipe) \ 3047#define PIPE_CRC_RES_RED(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_RED_A)
2981 _TRANSCODER2(pipe, _PIPE_CRC_RES_4_A_IVB) 3048#define PIPE_CRC_RES_GREEN(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_GREEN_A)
2982#define PIPE_CRC_RES_5_IVB(pipe) \ 3049#define PIPE_CRC_RES_BLUE(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_BLUE_A)
2983 _TRANSCODER2(pipe, _PIPE_CRC_RES_5_A_IVB) 3050#define PIPE_CRC_RES_RES1_I915(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_RES1_A_I915)
2984 3051#define PIPE_CRC_RES_RES2_G4X(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_RES2_A_G4X)
2985#define PIPE_CRC_RES_RED(pipe) \
2986 _TRANSCODER2(pipe, _PIPE_CRC_RES_RED_A)
2987#define PIPE_CRC_RES_GREEN(pipe) \
2988 _TRANSCODER2(pipe, _PIPE_CRC_RES_GREEN_A)
2989#define PIPE_CRC_RES_BLUE(pipe) \
2990 _TRANSCODER2(pipe, _PIPE_CRC_RES_BLUE_A)
2991#define PIPE_CRC_RES_RES1_I915(pipe) \
2992 _TRANSCODER2(pipe, _PIPE_CRC_RES_RES1_A_I915)
2993#define PIPE_CRC_RES_RES2_G4X(pipe) \
2994 _TRANSCODER2(pipe, _PIPE_CRC_RES_RES2_A_G4X)
2995 3052
2996/* Pipe A timing regs */ 3053/* Pipe A timing regs */
2997#define _HTOTAL_A 0x60000 3054#define _HTOTAL_A 0x60000
@@ -3023,20 +3080,20 @@ enum skl_disp_power_wells {
3023#define CHV_TRANSCODER_C_OFFSET 0x63000 3080#define CHV_TRANSCODER_C_OFFSET 0x63000
3024#define TRANSCODER_EDP_OFFSET 0x6f000 3081#define TRANSCODER_EDP_OFFSET 0x6f000
3025 3082
3026#define _TRANSCODER2(pipe, reg) (dev_priv->info.trans_offsets[(pipe)] - \ 3083#define _MMIO_TRANS2(pipe, reg) _MMIO(dev_priv->info.trans_offsets[(pipe)] - \
3027 dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \ 3084 dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \
3028 dev_priv->info.display_mmio_offset) 3085 dev_priv->info.display_mmio_offset)
3029 3086
3030#define HTOTAL(trans) _TRANSCODER2(trans, _HTOTAL_A) 3087#define HTOTAL(trans) _MMIO_TRANS2(trans, _HTOTAL_A)
3031#define HBLANK(trans) _TRANSCODER2(trans, _HBLANK_A) 3088#define HBLANK(trans) _MMIO_TRANS2(trans, _HBLANK_A)
3032#define HSYNC(trans) _TRANSCODER2(trans, _HSYNC_A) 3089#define HSYNC(trans) _MMIO_TRANS2(trans, _HSYNC_A)
3033#define VTOTAL(trans) _TRANSCODER2(trans, _VTOTAL_A) 3090#define VTOTAL(trans) _MMIO_TRANS2(trans, _VTOTAL_A)
3034#define VBLANK(trans) _TRANSCODER2(trans, _VBLANK_A) 3091#define VBLANK(trans) _MMIO_TRANS2(trans, _VBLANK_A)
3035#define VSYNC(trans) _TRANSCODER2(trans, _VSYNC_A) 3092#define VSYNC(trans) _MMIO_TRANS2(trans, _VSYNC_A)
3036#define BCLRPAT(trans) _TRANSCODER2(trans, _BCLRPAT_A) 3093#define BCLRPAT(trans) _MMIO_TRANS2(trans, _BCLRPAT_A)
3037#define VSYNCSHIFT(trans) _TRANSCODER2(trans, _VSYNCSHIFT_A) 3094#define VSYNCSHIFT(trans) _MMIO_TRANS2(trans, _VSYNCSHIFT_A)
3038#define PIPESRC(trans) _TRANSCODER2(trans, _PIPEASRC) 3095#define PIPESRC(trans) _MMIO_TRANS2(trans, _PIPEASRC)
3039#define PIPE_MULT(trans) _TRANSCODER2(trans, _PIPE_MULT_A) 3096#define PIPE_MULT(trans) _MMIO_TRANS2(trans, _PIPE_MULT_A)
3040 3097
3041/* VLV eDP PSR registers */ 3098/* VLV eDP PSR registers */
3042#define _PSRCTLA (VLV_DISPLAY_BASE + 0x60090) 3099#define _PSRCTLA (VLV_DISPLAY_BASE + 0x60090)
@@ -3052,14 +3109,14 @@ enum skl_disp_power_wells {
3052#define VLV_EDP_PSR_DBL_FRAME (1<<10) 3109#define VLV_EDP_PSR_DBL_FRAME (1<<10)
3053#define VLV_EDP_PSR_FRAME_COUNT_MASK (0xff<<16) 3110#define VLV_EDP_PSR_FRAME_COUNT_MASK (0xff<<16)
3054#define VLV_EDP_PSR_IDLE_FRAME_SHIFT 16 3111#define VLV_EDP_PSR_IDLE_FRAME_SHIFT 16
3055#define VLV_PSRCTL(pipe) _PIPE(pipe, _PSRCTLA, _PSRCTLB) 3112#define VLV_PSRCTL(pipe) _MMIO_PIPE(pipe, _PSRCTLA, _PSRCTLB)
3056 3113
3057#define _VSCSDPA (VLV_DISPLAY_BASE + 0x600a0) 3114#define _VSCSDPA (VLV_DISPLAY_BASE + 0x600a0)
3058#define _VSCSDPB (VLV_DISPLAY_BASE + 0x610a0) 3115#define _VSCSDPB (VLV_DISPLAY_BASE + 0x610a0)
3059#define VLV_EDP_PSR_SDP_FREQ_MASK (3<<30) 3116#define VLV_EDP_PSR_SDP_FREQ_MASK (3<<30)
3060#define VLV_EDP_PSR_SDP_FREQ_ONCE (1<<31) 3117#define VLV_EDP_PSR_SDP_FREQ_ONCE (1<<31)
3061#define VLV_EDP_PSR_SDP_FREQ_EVFRAME (1<<30) 3118#define VLV_EDP_PSR_SDP_FREQ_EVFRAME (1<<30)
3062#define VLV_VSCSDP(pipe) _PIPE(pipe, _VSCSDPA, _VSCSDPB) 3119#define VLV_VSCSDP(pipe) _MMIO_PIPE(pipe, _VSCSDPA, _VSCSDPB)
3063 3120
3064#define _PSRSTATA (VLV_DISPLAY_BASE + 0x60094) 3121#define _PSRSTATA (VLV_DISPLAY_BASE + 0x60094)
3065#define _PSRSTATB (VLV_DISPLAY_BASE + 0x61094) 3122#define _PSRSTATB (VLV_DISPLAY_BASE + 0x61094)
@@ -3072,11 +3129,12 @@ enum skl_disp_power_wells {
3072#define VLV_EDP_PSR_ACTIVE_SF_UPDATE (4<<0) 3129#define VLV_EDP_PSR_ACTIVE_SF_UPDATE (4<<0)
3073#define VLV_EDP_PSR_EXIT (5<<0) 3130#define VLV_EDP_PSR_EXIT (5<<0)
3074#define VLV_EDP_PSR_IN_TRANS (1<<7) 3131#define VLV_EDP_PSR_IN_TRANS (1<<7)
3075#define VLV_PSRSTAT(pipe) _PIPE(pipe, _PSRSTATA, _PSRSTATB) 3132#define VLV_PSRSTAT(pipe) _MMIO_PIPE(pipe, _PSRSTATA, _PSRSTATB)
3076 3133
3077/* HSW+ eDP PSR registers */ 3134/* HSW+ eDP PSR registers */
3078#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800) 3135#define HSW_EDP_PSR_BASE 0x64800
3079#define EDP_PSR_CTL(dev) (EDP_PSR_BASE(dev) + 0) 3136#define BDW_EDP_PSR_BASE 0x6f800
3137#define EDP_PSR_CTL _MMIO(dev_priv->psr_mmio_base + 0)
3080#define EDP_PSR_ENABLE (1<<31) 3138#define EDP_PSR_ENABLE (1<<31)
3081#define BDW_PSR_SINGLE_FRAME (1<<30) 3139#define BDW_PSR_SINGLE_FRAME (1<<30)
3082#define EDP_PSR_LINK_STANDBY (1<<27) 3140#define EDP_PSR_LINK_STANDBY (1<<27)
@@ -3099,14 +3157,10 @@ enum skl_disp_power_wells {
3099#define EDP_PSR_TP1_TIME_0us (3<<4) 3157#define EDP_PSR_TP1_TIME_0us (3<<4)
3100#define EDP_PSR_IDLE_FRAME_SHIFT 0 3158#define EDP_PSR_IDLE_FRAME_SHIFT 0
3101 3159
3102#define EDP_PSR_AUX_CTL(dev) (EDP_PSR_BASE(dev) + 0x10) 3160#define EDP_PSR_AUX_CTL _MMIO(dev_priv->psr_mmio_base + 0x10)
3103#define EDP_PSR_AUX_DATA1(dev) (EDP_PSR_BASE(dev) + 0x14) 3161#define EDP_PSR_AUX_DATA(i) _MMIO(dev_priv->psr_mmio_base + 0x14 + (i) * 4) /* 5 registers */
3104#define EDP_PSR_AUX_DATA2(dev) (EDP_PSR_BASE(dev) + 0x18)
3105#define EDP_PSR_AUX_DATA3(dev) (EDP_PSR_BASE(dev) + 0x1c)
3106#define EDP_PSR_AUX_DATA4(dev) (EDP_PSR_BASE(dev) + 0x20)
3107#define EDP_PSR_AUX_DATA5(dev) (EDP_PSR_BASE(dev) + 0x24)
3108 3162
3109#define EDP_PSR_STATUS_CTL(dev) (EDP_PSR_BASE(dev) + 0x40) 3163#define EDP_PSR_STATUS_CTL _MMIO(dev_priv->psr_mmio_base + 0x40)
3110#define EDP_PSR_STATUS_STATE_MASK (7<<29) 3164#define EDP_PSR_STATUS_STATE_MASK (7<<29)
3111#define EDP_PSR_STATUS_STATE_IDLE (0<<29) 3165#define EDP_PSR_STATUS_STATE_IDLE (0<<29)
3112#define EDP_PSR_STATUS_STATE_SRDONACK (1<<29) 3166#define EDP_PSR_STATUS_STATE_SRDONACK (1<<29)
@@ -3130,15 +3184,15 @@ enum skl_disp_power_wells {
3130#define EDP_PSR_STATUS_SENDING_TP1 (1<<4) 3184#define EDP_PSR_STATUS_SENDING_TP1 (1<<4)
3131#define EDP_PSR_STATUS_IDLE_MASK 0xf 3185#define EDP_PSR_STATUS_IDLE_MASK 0xf
3132 3186
3133#define EDP_PSR_PERF_CNT(dev) (EDP_PSR_BASE(dev) + 0x44) 3187#define EDP_PSR_PERF_CNT _MMIO(dev_priv->psr_mmio_base + 0x44)
3134#define EDP_PSR_PERF_CNT_MASK 0xffffff 3188#define EDP_PSR_PERF_CNT_MASK 0xffffff
3135 3189
3136#define EDP_PSR_DEBUG_CTL(dev) (EDP_PSR_BASE(dev) + 0x60) 3190#define EDP_PSR_DEBUG_CTL _MMIO(dev_priv->psr_mmio_base + 0x60)
3137#define EDP_PSR_DEBUG_MASK_LPSP (1<<27) 3191#define EDP_PSR_DEBUG_MASK_LPSP (1<<27)
3138#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26) 3192#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26)
3139#define EDP_PSR_DEBUG_MASK_HPD (1<<25) 3193#define EDP_PSR_DEBUG_MASK_HPD (1<<25)
3140 3194
3141#define EDP_PSR2_CTL 0x6f900 3195#define EDP_PSR2_CTL _MMIO(0x6f900)
3142#define EDP_PSR2_ENABLE (1<<31) 3196#define EDP_PSR2_ENABLE (1<<31)
3143#define EDP_SU_TRACK_ENABLE (1<<30) 3197#define EDP_SU_TRACK_ENABLE (1<<30)
3144#define EDP_MAX_SU_DISABLE_TIME(t) ((t)<<20) 3198#define EDP_MAX_SU_DISABLE_TIME(t) ((t)<<20)
@@ -3153,9 +3207,9 @@ enum skl_disp_power_wells {
3153#define EDP_PSR2_IDLE_MASK 0xf 3207#define EDP_PSR2_IDLE_MASK 0xf
3154 3208
3155/* VGA port control */ 3209/* VGA port control */
3156#define ADPA 0x61100 3210#define ADPA _MMIO(0x61100)
3157#define PCH_ADPA 0xe1100 3211#define PCH_ADPA _MMIO(0xe1100)
3158#define VLV_ADPA (VLV_DISPLAY_BASE + ADPA) 3212#define VLV_ADPA _MMIO(VLV_DISPLAY_BASE + 0x61100)
3159 3213
3160#define ADPA_DAC_ENABLE (1<<31) 3214#define ADPA_DAC_ENABLE (1<<31)
3161#define ADPA_DAC_DISABLE 0 3215#define ADPA_DAC_DISABLE 0
@@ -3201,7 +3255,7 @@ enum skl_disp_power_wells {
3201 3255
3202 3256
3203/* Hotplug control (945+ only) */ 3257/* Hotplug control (945+ only) */
3204#define PORT_HOTPLUG_EN (dev_priv->info.display_mmio_offset + 0x61110) 3258#define PORT_HOTPLUG_EN _MMIO(dev_priv->info.display_mmio_offset + 0x61110)
3205#define PORTB_HOTPLUG_INT_EN (1 << 29) 3259#define PORTB_HOTPLUG_INT_EN (1 << 29)
3206#define PORTC_HOTPLUG_INT_EN (1 << 28) 3260#define PORTC_HOTPLUG_INT_EN (1 << 28)
3207#define PORTD_HOTPLUG_INT_EN (1 << 27) 3261#define PORTD_HOTPLUG_INT_EN (1 << 27)
@@ -3231,7 +3285,7 @@ enum skl_disp_power_wells {
3231#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) 3285#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
3232#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) 3286#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
3233 3287
3234#define PORT_HOTPLUG_STAT (dev_priv->info.display_mmio_offset + 0x61114) 3288#define PORT_HOTPLUG_STAT _MMIO(dev_priv->info.display_mmio_offset + 0x61114)
3235/* 3289/*
3236 * HDMI/DP bits are gen4+ 3290 * HDMI/DP bits are gen4+
3237 * 3291 *
@@ -3296,21 +3350,23 @@ enum skl_disp_power_wells {
3296 3350
3297/* SDVO and HDMI port control. 3351/* SDVO and HDMI port control.
3298 * The same register may be used for SDVO or HDMI */ 3352 * The same register may be used for SDVO or HDMI */
3299#define GEN3_SDVOB 0x61140 3353#define _GEN3_SDVOB 0x61140
3300#define GEN3_SDVOC 0x61160 3354#define _GEN3_SDVOC 0x61160
3355#define GEN3_SDVOB _MMIO(_GEN3_SDVOB)
3356#define GEN3_SDVOC _MMIO(_GEN3_SDVOC)
3301#define GEN4_HDMIB GEN3_SDVOB 3357#define GEN4_HDMIB GEN3_SDVOB
3302#define GEN4_HDMIC GEN3_SDVOC 3358#define GEN4_HDMIC GEN3_SDVOC
3303#define VLV_HDMIB (VLV_DISPLAY_BASE + GEN4_HDMIB) 3359#define VLV_HDMIB _MMIO(VLV_DISPLAY_BASE + 0x61140)
3304#define VLV_HDMIC (VLV_DISPLAY_BASE + GEN4_HDMIC) 3360#define VLV_HDMIC _MMIO(VLV_DISPLAY_BASE + 0x61160)
3305#define CHV_HDMID (VLV_DISPLAY_BASE + 0x6116C) 3361#define CHV_HDMID _MMIO(VLV_DISPLAY_BASE + 0x6116C)
3306#define PCH_SDVOB 0xe1140 3362#define PCH_SDVOB _MMIO(0xe1140)
3307#define PCH_HDMIB PCH_SDVOB 3363#define PCH_HDMIB PCH_SDVOB
3308#define PCH_HDMIC 0xe1150 3364#define PCH_HDMIC _MMIO(0xe1150)
3309#define PCH_HDMID 0xe1160 3365#define PCH_HDMID _MMIO(0xe1160)
3310 3366
3311#define PORT_DFT_I9XX 0x61150 3367#define PORT_DFT_I9XX _MMIO(0x61150)
3312#define DC_BALANCE_RESET (1 << 25) 3368#define DC_BALANCE_RESET (1 << 25)
3313#define PORT_DFT2_G4X (dev_priv->info.display_mmio_offset + 0x61154) 3369#define PORT_DFT2_G4X _MMIO(dev_priv->info.display_mmio_offset + 0x61154)
3314#define DC_BALANCE_RESET_VLV (1 << 31) 3370#define DC_BALANCE_RESET_VLV (1 << 31)
3315#define PIPE_SCRAMBLE_RESET_MASK ((1 << 14) | (0x3 << 0)) 3371#define PIPE_SCRAMBLE_RESET_MASK ((1 << 14) | (0x3 << 0))
3316#define PIPE_C_SCRAMBLE_RESET (1 << 14) /* chv */ 3372#define PIPE_C_SCRAMBLE_RESET (1 << 14) /* chv */
@@ -3370,9 +3426,12 @@ enum skl_disp_power_wells {
3370 3426
3371 3427
3372/* DVO port control */ 3428/* DVO port control */
3373#define DVOA 0x61120 3429#define _DVOA 0x61120
3374#define DVOB 0x61140 3430#define DVOA _MMIO(_DVOA)
3375#define DVOC 0x61160 3431#define _DVOB 0x61140
3432#define DVOB _MMIO(_DVOB)
3433#define _DVOC 0x61160
3434#define DVOC _MMIO(_DVOC)
3376#define DVO_ENABLE (1 << 31) 3435#define DVO_ENABLE (1 << 31)
3377#define DVO_PIPE_B_SELECT (1 << 30) 3436#define DVO_PIPE_B_SELECT (1 << 30)
3378#define DVO_PIPE_STALL_UNUSED (0 << 28) 3437#define DVO_PIPE_STALL_UNUSED (0 << 28)
@@ -3397,14 +3456,14 @@ enum skl_disp_power_wells {
3397#define DVO_OUTPUT_CSTATE_PIXELS (1 << 1) /* SDG only */ 3456#define DVO_OUTPUT_CSTATE_PIXELS (1 << 1) /* SDG only */
3398#define DVO_OUTPUT_SOURCE_SIZE_PIXELS (1 << 0) /* SDG only */ 3457#define DVO_OUTPUT_SOURCE_SIZE_PIXELS (1 << 0) /* SDG only */
3399#define DVO_PRESERVE_MASK (0x7<<24) 3458#define DVO_PRESERVE_MASK (0x7<<24)
3400#define DVOA_SRCDIM 0x61124 3459#define DVOA_SRCDIM _MMIO(0x61124)
3401#define DVOB_SRCDIM 0x61144 3460#define DVOB_SRCDIM _MMIO(0x61144)
3402#define DVOC_SRCDIM 0x61164 3461#define DVOC_SRCDIM _MMIO(0x61164)
3403#define DVO_SRCDIM_HORIZONTAL_SHIFT 12 3462#define DVO_SRCDIM_HORIZONTAL_SHIFT 12
3404#define DVO_SRCDIM_VERTICAL_SHIFT 0 3463#define DVO_SRCDIM_VERTICAL_SHIFT 0
3405 3464
3406/* LVDS port control */ 3465/* LVDS port control */
3407#define LVDS 0x61180 3466#define LVDS _MMIO(0x61180)
3408/* 3467/*
3409 * Enables the LVDS port. This bit must be set before DPLLs are enabled, as 3468 * Enables the LVDS port. This bit must be set before DPLLs are enabled, as
3410 * the DPLL semantics change when the LVDS is assigned to that pipe. 3469 * the DPLL semantics change when the LVDS is assigned to that pipe.
@@ -3454,13 +3513,13 @@ enum skl_disp_power_wells {
3454#define LVDS_B0B3_POWER_UP (3 << 2) 3513#define LVDS_B0B3_POWER_UP (3 << 2)
3455 3514
3456/* Video Data Island Packet control */ 3515/* Video Data Island Packet control */
3457#define VIDEO_DIP_DATA 0x61178 3516#define VIDEO_DIP_DATA _MMIO(0x61178)
3458/* Read the description of VIDEO_DIP_DATA (before Haswell) or VIDEO_DIP_ECC 3517/* Read the description of VIDEO_DIP_DATA (before Haswell) or VIDEO_DIP_ECC
3459 * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte 3518 * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
3460 * of the infoframe structure specified by CEA-861. */ 3519 * of the infoframe structure specified by CEA-861. */
3461#define VIDEO_DIP_DATA_SIZE 32 3520#define VIDEO_DIP_DATA_SIZE 32
3462#define VIDEO_DIP_VSC_DATA_SIZE 36 3521#define VIDEO_DIP_VSC_DATA_SIZE 36
3463#define VIDEO_DIP_CTL 0x61170 3522#define VIDEO_DIP_CTL _MMIO(0x61170)
3464/* Pre HSW: */ 3523/* Pre HSW: */
3465#define VIDEO_DIP_ENABLE (1 << 31) 3524#define VIDEO_DIP_ENABLE (1 << 31)
3466#define VIDEO_DIP_PORT(port) ((port) << 29) 3525#define VIDEO_DIP_PORT(port) ((port) << 29)
@@ -3487,7 +3546,7 @@ enum skl_disp_power_wells {
3487#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0) 3546#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
3488 3547
3489/* Panel power sequencing */ 3548/* Panel power sequencing */
3490#define PP_STATUS 0x61200 3549#define PP_STATUS _MMIO(0x61200)
3491#define PP_ON (1 << 31) 3550#define PP_ON (1 << 31)
3492/* 3551/*
3493 * Indicates that all dependencies of the panel are on: 3552 * Indicates that all dependencies of the panel are on:
@@ -3513,14 +3572,14 @@ enum skl_disp_power_wells {
3513#define PP_SEQUENCE_STATE_ON_S1_2 (0xa << 0) 3572#define PP_SEQUENCE_STATE_ON_S1_2 (0xa << 0)
3514#define PP_SEQUENCE_STATE_ON_S1_3 (0xb << 0) 3573#define PP_SEQUENCE_STATE_ON_S1_3 (0xb << 0)
3515#define PP_SEQUENCE_STATE_RESET (0xf << 0) 3574#define PP_SEQUENCE_STATE_RESET (0xf << 0)
3516#define PP_CONTROL 0x61204 3575#define PP_CONTROL _MMIO(0x61204)
3517#define POWER_TARGET_ON (1 << 0) 3576#define POWER_TARGET_ON (1 << 0)
3518#define PP_ON_DELAYS 0x61208 3577#define PP_ON_DELAYS _MMIO(0x61208)
3519#define PP_OFF_DELAYS 0x6120c 3578#define PP_OFF_DELAYS _MMIO(0x6120c)
3520#define PP_DIVISOR 0x61210 3579#define PP_DIVISOR _MMIO(0x61210)
3521 3580
3522/* Panel fitting */ 3581/* Panel fitting */
3523#define PFIT_CONTROL (dev_priv->info.display_mmio_offset + 0x61230) 3582#define PFIT_CONTROL _MMIO(dev_priv->info.display_mmio_offset + 0x61230)
3524#define PFIT_ENABLE (1 << 31) 3583#define PFIT_ENABLE (1 << 31)
3525#define PFIT_PIPE_MASK (3 << 29) 3584#define PFIT_PIPE_MASK (3 << 29)
3526#define PFIT_PIPE_SHIFT 29 3585#define PFIT_PIPE_SHIFT 29
@@ -3538,7 +3597,7 @@ enum skl_disp_power_wells {
3538#define PFIT_SCALING_PROGRAMMED (1 << 26) 3597#define PFIT_SCALING_PROGRAMMED (1 << 26)
3539#define PFIT_SCALING_PILLAR (2 << 26) 3598#define PFIT_SCALING_PILLAR (2 << 26)
3540#define PFIT_SCALING_LETTER (3 << 26) 3599#define PFIT_SCALING_LETTER (3 << 26)
3541#define PFIT_PGM_RATIOS (dev_priv->info.display_mmio_offset + 0x61234) 3600#define PFIT_PGM_RATIOS _MMIO(dev_priv->info.display_mmio_offset + 0x61234)
3542/* Pre-965 */ 3601/* Pre-965 */
3543#define PFIT_VERT_SCALE_SHIFT 20 3602#define PFIT_VERT_SCALE_SHIFT 20
3544#define PFIT_VERT_SCALE_MASK 0xfff00000 3603#define PFIT_VERT_SCALE_MASK 0xfff00000
@@ -3550,25 +3609,25 @@ enum skl_disp_power_wells {
3550#define PFIT_HORIZ_SCALE_SHIFT_965 0 3609#define PFIT_HORIZ_SCALE_SHIFT_965 0
3551#define PFIT_HORIZ_SCALE_MASK_965 0x00001fff 3610#define PFIT_HORIZ_SCALE_MASK_965 0x00001fff
3552 3611
3553#define PFIT_AUTO_RATIOS (dev_priv->info.display_mmio_offset + 0x61238) 3612#define PFIT_AUTO_RATIOS _MMIO(dev_priv->info.display_mmio_offset + 0x61238)
3554 3613
3555#define _VLV_BLC_PWM_CTL2_A (dev_priv->info.display_mmio_offset + 0x61250) 3614#define _VLV_BLC_PWM_CTL2_A (dev_priv->info.display_mmio_offset + 0x61250)
3556#define _VLV_BLC_PWM_CTL2_B (dev_priv->info.display_mmio_offset + 0x61350) 3615#define _VLV_BLC_PWM_CTL2_B (dev_priv->info.display_mmio_offset + 0x61350)
3557#define VLV_BLC_PWM_CTL2(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \ 3616#define VLV_BLC_PWM_CTL2(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \
3558 _VLV_BLC_PWM_CTL2_B) 3617 _VLV_BLC_PWM_CTL2_B)
3559 3618
3560#define _VLV_BLC_PWM_CTL_A (dev_priv->info.display_mmio_offset + 0x61254) 3619#define _VLV_BLC_PWM_CTL_A (dev_priv->info.display_mmio_offset + 0x61254)
3561#define _VLV_BLC_PWM_CTL_B (dev_priv->info.display_mmio_offset + 0x61354) 3620#define _VLV_BLC_PWM_CTL_B (dev_priv->info.display_mmio_offset + 0x61354)
3562#define VLV_BLC_PWM_CTL(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL_A, \ 3621#define VLV_BLC_PWM_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL_A, \
3563 _VLV_BLC_PWM_CTL_B) 3622 _VLV_BLC_PWM_CTL_B)
3564 3623
3565#define _VLV_BLC_HIST_CTL_A (dev_priv->info.display_mmio_offset + 0x61260) 3624#define _VLV_BLC_HIST_CTL_A (dev_priv->info.display_mmio_offset + 0x61260)
3566#define _VLV_BLC_HIST_CTL_B (dev_priv->info.display_mmio_offset + 0x61360) 3625#define _VLV_BLC_HIST_CTL_B (dev_priv->info.display_mmio_offset + 0x61360)
3567#define VLV_BLC_HIST_CTL(pipe) _PIPE(pipe, _VLV_BLC_HIST_CTL_A, \ 3626#define VLV_BLC_HIST_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_HIST_CTL_A, \
3568 _VLV_BLC_HIST_CTL_B) 3627 _VLV_BLC_HIST_CTL_B)
3569 3628
3570/* Backlight control */ 3629/* Backlight control */
3571#define BLC_PWM_CTL2 (dev_priv->info.display_mmio_offset + 0x61250) /* 965+ only */ 3630#define BLC_PWM_CTL2 _MMIO(dev_priv->info.display_mmio_offset + 0x61250) /* 965+ only */
3572#define BLM_PWM_ENABLE (1 << 31) 3631#define BLM_PWM_ENABLE (1 << 31)
3573#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */ 3632#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */
3574#define BLM_PIPE_SELECT (1 << 29) 3633#define BLM_PIPE_SELECT (1 << 29)
@@ -3591,7 +3650,7 @@ enum skl_disp_power_wells {
3591#define BLM_PHASE_IN_COUNT_MASK (0xff << 8) 3650#define BLM_PHASE_IN_COUNT_MASK (0xff << 8)
3592#define BLM_PHASE_IN_INCR_SHIFT (0) 3651#define BLM_PHASE_IN_INCR_SHIFT (0)
3593#define BLM_PHASE_IN_INCR_MASK (0xff << 0) 3652#define BLM_PHASE_IN_INCR_MASK (0xff << 0)
3594#define BLC_PWM_CTL (dev_priv->info.display_mmio_offset + 0x61254) 3653#define BLC_PWM_CTL _MMIO(dev_priv->info.display_mmio_offset + 0x61254)
3595/* 3654/*
3596 * This is the most significant 15 bits of the number of backlight cycles in a 3655 * This is the most significant 15 bits of the number of backlight cycles in a
3597 * complete cycle of the modulated backlight control. 3656 * complete cycle of the modulated backlight control.
@@ -3613,25 +3672,25 @@ enum skl_disp_power_wells {
3613#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe) 3672#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe)
3614#define BLM_POLARITY_PNV (1 << 0) /* pnv only */ 3673#define BLM_POLARITY_PNV (1 << 0) /* pnv only */
3615 3674
3616#define BLC_HIST_CTL (dev_priv->info.display_mmio_offset + 0x61260) 3675#define BLC_HIST_CTL _MMIO(dev_priv->info.display_mmio_offset + 0x61260)
3617#define BLM_HISTOGRAM_ENABLE (1 << 31) 3676#define BLM_HISTOGRAM_ENABLE (1 << 31)
3618 3677
3619/* New registers for PCH-split platforms. Safe where new bits show up, the 3678/* New registers for PCH-split platforms. Safe where new bits show up, the
3620 * register layout machtes with gen4 BLC_PWM_CTL[12]. */ 3679 * register layout machtes with gen4 BLC_PWM_CTL[12]. */
3621#define BLC_PWM_CPU_CTL2 0x48250 3680#define BLC_PWM_CPU_CTL2 _MMIO(0x48250)
3622#define BLC_PWM_CPU_CTL 0x48254 3681#define BLC_PWM_CPU_CTL _MMIO(0x48254)
3623 3682
3624#define HSW_BLC_PWM2_CTL 0x48350 3683#define HSW_BLC_PWM2_CTL _MMIO(0x48350)
3625 3684
3626/* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is 3685/* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is
3627 * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */ 3686 * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */
3628#define BLC_PWM_PCH_CTL1 0xc8250 3687#define BLC_PWM_PCH_CTL1 _MMIO(0xc8250)
3629#define BLM_PCH_PWM_ENABLE (1 << 31) 3688#define BLM_PCH_PWM_ENABLE (1 << 31)
3630#define BLM_PCH_OVERRIDE_ENABLE (1 << 30) 3689#define BLM_PCH_OVERRIDE_ENABLE (1 << 30)
3631#define BLM_PCH_POLARITY (1 << 29) 3690#define BLM_PCH_POLARITY (1 << 29)
3632#define BLC_PWM_PCH_CTL2 0xc8254 3691#define BLC_PWM_PCH_CTL2 _MMIO(0xc8254)
3633 3692
3634#define UTIL_PIN_CTL 0x48400 3693#define UTIL_PIN_CTL _MMIO(0x48400)
3635#define UTIL_PIN_ENABLE (1 << 31) 3694#define UTIL_PIN_ENABLE (1 << 31)
3636 3695
3637#define UTIL_PIN_PIPE(x) ((x) << 29) 3696#define UTIL_PIN_PIPE(x) ((x) << 29)
@@ -3651,18 +3710,18 @@ enum skl_disp_power_wells {
3651#define _BXT_BLC_PWM_FREQ2 0xC8354 3710#define _BXT_BLC_PWM_FREQ2 0xC8354
3652#define _BXT_BLC_PWM_DUTY2 0xC8358 3711#define _BXT_BLC_PWM_DUTY2 0xC8358
3653 3712
3654#define BXT_BLC_PWM_CTL(controller) _PIPE(controller, \ 3713#define BXT_BLC_PWM_CTL(controller) _MMIO_PIPE(controller, \
3655 _BXT_BLC_PWM_CTL1, _BXT_BLC_PWM_CTL2) 3714 _BXT_BLC_PWM_CTL1, _BXT_BLC_PWM_CTL2)
3656#define BXT_BLC_PWM_FREQ(controller) _PIPE(controller, \ 3715#define BXT_BLC_PWM_FREQ(controller) _MMIO_PIPE(controller, \
3657 _BXT_BLC_PWM_FREQ1, _BXT_BLC_PWM_FREQ2) 3716 _BXT_BLC_PWM_FREQ1, _BXT_BLC_PWM_FREQ2)
3658#define BXT_BLC_PWM_DUTY(controller) _PIPE(controller, \ 3717#define BXT_BLC_PWM_DUTY(controller) _MMIO_PIPE(controller, \
3659 _BXT_BLC_PWM_DUTY1, _BXT_BLC_PWM_DUTY2) 3718 _BXT_BLC_PWM_DUTY1, _BXT_BLC_PWM_DUTY2)
3660 3719
3661#define PCH_GTC_CTL 0xe7000 3720#define PCH_GTC_CTL _MMIO(0xe7000)
3662#define PCH_GTC_ENABLE (1 << 31) 3721#define PCH_GTC_ENABLE (1 << 31)
3663 3722
3664/* TV port control */ 3723/* TV port control */
3665#define TV_CTL 0x68000 3724#define TV_CTL _MMIO(0x68000)
3666/* Enables the TV encoder */ 3725/* Enables the TV encoder */
3667# define TV_ENC_ENABLE (1 << 31) 3726# define TV_ENC_ENABLE (1 << 31)
3668/* Sources the TV encoder input from pipe B instead of A. */ 3727/* Sources the TV encoder input from pipe B instead of A. */
@@ -3729,7 +3788,7 @@ enum skl_disp_power_wells {
3729# define TV_TEST_MODE_MONITOR_DETECT (7 << 0) 3788# define TV_TEST_MODE_MONITOR_DETECT (7 << 0)
3730# define TV_TEST_MODE_MASK (7 << 0) 3789# define TV_TEST_MODE_MASK (7 << 0)
3731 3790
3732#define TV_DAC 0x68004 3791#define TV_DAC _MMIO(0x68004)
3733# define TV_DAC_SAVE 0x00ffff00 3792# define TV_DAC_SAVE 0x00ffff00
3734/* 3793/*
3735 * Reports that DAC state change logic has reported change (RO). 3794 * Reports that DAC state change logic has reported change (RO).
@@ -3780,13 +3839,13 @@ enum skl_disp_power_wells {
3780 * where 2-bit exponents are unsigned n, and 3-bit exponents are signed n with 3839 * where 2-bit exponents are unsigned n, and 3-bit exponents are signed n with
3781 * -1 (0x3) being the only legal negative value. 3840 * -1 (0x3) being the only legal negative value.
3782 */ 3841 */
3783#define TV_CSC_Y 0x68010 3842#define TV_CSC_Y _MMIO(0x68010)
3784# define TV_RY_MASK 0x07ff0000 3843# define TV_RY_MASK 0x07ff0000
3785# define TV_RY_SHIFT 16 3844# define TV_RY_SHIFT 16
3786# define TV_GY_MASK 0x00000fff 3845# define TV_GY_MASK 0x00000fff
3787# define TV_GY_SHIFT 0 3846# define TV_GY_SHIFT 0
3788 3847
3789#define TV_CSC_Y2 0x68014 3848#define TV_CSC_Y2 _MMIO(0x68014)
3790# define TV_BY_MASK 0x07ff0000 3849# define TV_BY_MASK 0x07ff0000
3791# define TV_BY_SHIFT 16 3850# define TV_BY_SHIFT 16
3792/* 3851/*
@@ -3797,13 +3856,13 @@ enum skl_disp_power_wells {
3797# define TV_AY_MASK 0x000003ff 3856# define TV_AY_MASK 0x000003ff
3798# define TV_AY_SHIFT 0 3857# define TV_AY_SHIFT 0
3799 3858
3800#define TV_CSC_U 0x68018 3859#define TV_CSC_U _MMIO(0x68018)
3801# define TV_RU_MASK 0x07ff0000 3860# define TV_RU_MASK 0x07ff0000
3802# define TV_RU_SHIFT 16 3861# define TV_RU_SHIFT 16
3803# define TV_GU_MASK 0x000007ff 3862# define TV_GU_MASK 0x000007ff
3804# define TV_GU_SHIFT 0 3863# define TV_GU_SHIFT 0
3805 3864
3806#define TV_CSC_U2 0x6801c 3865#define TV_CSC_U2 _MMIO(0x6801c)
3807# define TV_BU_MASK 0x07ff0000 3866# define TV_BU_MASK 0x07ff0000
3808# define TV_BU_SHIFT 16 3867# define TV_BU_SHIFT 16
3809/* 3868/*
@@ -3814,13 +3873,13 @@ enum skl_disp_power_wells {
3814# define TV_AU_MASK 0x000003ff 3873# define TV_AU_MASK 0x000003ff
3815# define TV_AU_SHIFT 0 3874# define TV_AU_SHIFT 0
3816 3875
3817#define TV_CSC_V 0x68020 3876#define TV_CSC_V _MMIO(0x68020)
3818# define TV_RV_MASK 0x0fff0000 3877# define TV_RV_MASK 0x0fff0000
3819# define TV_RV_SHIFT 16 3878# define TV_RV_SHIFT 16
3820# define TV_GV_MASK 0x000007ff 3879# define TV_GV_MASK 0x000007ff
3821# define TV_GV_SHIFT 0 3880# define TV_GV_SHIFT 0
3822 3881
3823#define TV_CSC_V2 0x68024 3882#define TV_CSC_V2 _MMIO(0x68024)
3824# define TV_BV_MASK 0x07ff0000 3883# define TV_BV_MASK 0x07ff0000
3825# define TV_BV_SHIFT 16 3884# define TV_BV_SHIFT 16
3826/* 3885/*
@@ -3831,7 +3890,7 @@ enum skl_disp_power_wells {
3831# define TV_AV_MASK 0x000007ff 3890# define TV_AV_MASK 0x000007ff
3832# define TV_AV_SHIFT 0 3891# define TV_AV_SHIFT 0
3833 3892
3834#define TV_CLR_KNOBS 0x68028 3893#define TV_CLR_KNOBS _MMIO(0x68028)
3835/* 2s-complement brightness adjustment */ 3894/* 2s-complement brightness adjustment */
3836# define TV_BRIGHTNESS_MASK 0xff000000 3895# define TV_BRIGHTNESS_MASK 0xff000000
3837# define TV_BRIGHTNESS_SHIFT 24 3896# define TV_BRIGHTNESS_SHIFT 24
@@ -3845,7 +3904,7 @@ enum skl_disp_power_wells {
3845# define TV_HUE_MASK 0x000000ff 3904# define TV_HUE_MASK 0x000000ff
3846# define TV_HUE_SHIFT 0 3905# define TV_HUE_SHIFT 0
3847 3906
3848#define TV_CLR_LEVEL 0x6802c 3907#define TV_CLR_LEVEL _MMIO(0x6802c)
3849/* Controls the DAC level for black */ 3908/* Controls the DAC level for black */
3850# define TV_BLACK_LEVEL_MASK 0x01ff0000 3909# define TV_BLACK_LEVEL_MASK 0x01ff0000
3851# define TV_BLACK_LEVEL_SHIFT 16 3910# define TV_BLACK_LEVEL_SHIFT 16
@@ -3853,7 +3912,7 @@ enum skl_disp_power_wells {
3853# define TV_BLANK_LEVEL_MASK 0x000001ff 3912# define TV_BLANK_LEVEL_MASK 0x000001ff
3854# define TV_BLANK_LEVEL_SHIFT 0 3913# define TV_BLANK_LEVEL_SHIFT 0
3855 3914
3856#define TV_H_CTL_1 0x68030 3915#define TV_H_CTL_1 _MMIO(0x68030)
3857/* Number of pixels in the hsync. */ 3916/* Number of pixels in the hsync. */
3858# define TV_HSYNC_END_MASK 0x1fff0000 3917# define TV_HSYNC_END_MASK 0x1fff0000
3859# define TV_HSYNC_END_SHIFT 16 3918# define TV_HSYNC_END_SHIFT 16
@@ -3861,7 +3920,7 @@ enum skl_disp_power_wells {
3861# define TV_HTOTAL_MASK 0x00001fff 3920# define TV_HTOTAL_MASK 0x00001fff
3862# define TV_HTOTAL_SHIFT 0 3921# define TV_HTOTAL_SHIFT 0
3863 3922
3864#define TV_H_CTL_2 0x68034 3923#define TV_H_CTL_2 _MMIO(0x68034)
3865/* Enables the colorburst (needed for non-component color) */ 3924/* Enables the colorburst (needed for non-component color) */
3866# define TV_BURST_ENA (1 << 31) 3925# define TV_BURST_ENA (1 << 31)
3867/* Offset of the colorburst from the start of hsync, in pixels minus one. */ 3926/* Offset of the colorburst from the start of hsync, in pixels minus one. */
@@ -3871,7 +3930,7 @@ enum skl_disp_power_wells {
3871# define TV_HBURST_LEN_SHIFT 0 3930# define TV_HBURST_LEN_SHIFT 0
3872# define TV_HBURST_LEN_MASK 0x0001fff 3931# define TV_HBURST_LEN_MASK 0x0001fff
3873 3932
3874#define TV_H_CTL_3 0x68038 3933#define TV_H_CTL_3 _MMIO(0x68038)
3875/* End of hblank, measured in pixels minus one from start of hsync */ 3934/* End of hblank, measured in pixels minus one from start of hsync */
3876# define TV_HBLANK_END_SHIFT 16 3935# define TV_HBLANK_END_SHIFT 16
3877# define TV_HBLANK_END_MASK 0x1fff0000 3936# define TV_HBLANK_END_MASK 0x1fff0000
@@ -3879,7 +3938,7 @@ enum skl_disp_power_wells {
3879# define TV_HBLANK_START_SHIFT 0 3938# define TV_HBLANK_START_SHIFT 0
3880# define TV_HBLANK_START_MASK 0x0001fff 3939# define TV_HBLANK_START_MASK 0x0001fff
3881 3940
3882#define TV_V_CTL_1 0x6803c 3941#define TV_V_CTL_1 _MMIO(0x6803c)
3883/* XXX */ 3942/* XXX */
3884# define TV_NBR_END_SHIFT 16 3943# define TV_NBR_END_SHIFT 16
3885# define TV_NBR_END_MASK 0x07ff0000 3944# define TV_NBR_END_MASK 0x07ff0000
@@ -3890,7 +3949,7 @@ enum skl_disp_power_wells {
3890# define TV_VI_END_F2_SHIFT 0 3949# define TV_VI_END_F2_SHIFT 0
3891# define TV_VI_END_F2_MASK 0x0000003f 3950# define TV_VI_END_F2_MASK 0x0000003f
3892 3951
3893#define TV_V_CTL_2 0x68040 3952#define TV_V_CTL_2 _MMIO(0x68040)
3894/* Length of vsync, in half lines */ 3953/* Length of vsync, in half lines */
3895# define TV_VSYNC_LEN_MASK 0x07ff0000 3954# define TV_VSYNC_LEN_MASK 0x07ff0000
3896# define TV_VSYNC_LEN_SHIFT 16 3955# define TV_VSYNC_LEN_SHIFT 16
@@ -3906,7 +3965,7 @@ enum skl_disp_power_wells {
3906# define TV_VSYNC_START_F2_MASK 0x0000007f 3965# define TV_VSYNC_START_F2_MASK 0x0000007f
3907# define TV_VSYNC_START_F2_SHIFT 0 3966# define TV_VSYNC_START_F2_SHIFT 0
3908 3967
3909#define TV_V_CTL_3 0x68044 3968#define TV_V_CTL_3 _MMIO(0x68044)
3910/* Enables generation of the equalization signal */ 3969/* Enables generation of the equalization signal */
3911# define TV_EQUAL_ENA (1 << 31) 3970# define TV_EQUAL_ENA (1 << 31)
3912/* Length of vsync, in half lines */ 3971/* Length of vsync, in half lines */
@@ -3924,7 +3983,7 @@ enum skl_disp_power_wells {
3924# define TV_VEQ_START_F2_MASK 0x000007f 3983# define TV_VEQ_START_F2_MASK 0x000007f
3925# define TV_VEQ_START_F2_SHIFT 0 3984# define TV_VEQ_START_F2_SHIFT 0
3926 3985
3927#define TV_V_CTL_4 0x68048 3986#define TV_V_CTL_4 _MMIO(0x68048)
3928/* 3987/*
3929 * Offset to start of vertical colorburst, measured in one less than the 3988 * Offset to start of vertical colorburst, measured in one less than the
3930 * number of lines from vertical start. 3989 * number of lines from vertical start.
@@ -3938,7 +3997,7 @@ enum skl_disp_power_wells {
3938# define TV_VBURST_END_F1_MASK 0x000000ff 3997# define TV_VBURST_END_F1_MASK 0x000000ff
3939# define TV_VBURST_END_F1_SHIFT 0 3998# define TV_VBURST_END_F1_SHIFT 0
3940 3999
3941#define TV_V_CTL_5 0x6804c 4000#define TV_V_CTL_5 _MMIO(0x6804c)
3942/* 4001/*
3943 * Offset to start of vertical colorburst, measured in one less than the 4002 * Offset to start of vertical colorburst, measured in one less than the
3944 * number of lines from vertical start. 4003 * number of lines from vertical start.
@@ -3952,7 +4011,7 @@ enum skl_disp_power_wells {
3952# define TV_VBURST_END_F2_MASK 0x000000ff 4011# define TV_VBURST_END_F2_MASK 0x000000ff
3953# define TV_VBURST_END_F2_SHIFT 0 4012# define TV_VBURST_END_F2_SHIFT 0
3954 4013
3955#define TV_V_CTL_6 0x68050 4014#define TV_V_CTL_6 _MMIO(0x68050)
3956/* 4015/*
3957 * Offset to start of vertical colorburst, measured in one less than the 4016 * Offset to start of vertical colorburst, measured in one less than the
3958 * number of lines from vertical start. 4017 * number of lines from vertical start.
@@ -3966,7 +4025,7 @@ enum skl_disp_power_wells {
3966# define TV_VBURST_END_F3_MASK 0x000000ff 4025# define TV_VBURST_END_F3_MASK 0x000000ff
3967# define TV_VBURST_END_F3_SHIFT 0 4026# define TV_VBURST_END_F3_SHIFT 0
3968 4027
3969#define TV_V_CTL_7 0x68054 4028#define TV_V_CTL_7 _MMIO(0x68054)
3970/* 4029/*
3971 * Offset to start of vertical colorburst, measured in one less than the 4030 * Offset to start of vertical colorburst, measured in one less than the
3972 * number of lines from vertical start. 4031 * number of lines from vertical start.
@@ -3980,7 +4039,7 @@ enum skl_disp_power_wells {
3980# define TV_VBURST_END_F4_MASK 0x000000ff 4039# define TV_VBURST_END_F4_MASK 0x000000ff
3981# define TV_VBURST_END_F4_SHIFT 0 4040# define TV_VBURST_END_F4_SHIFT 0
3982 4041
3983#define TV_SC_CTL_1 0x68060 4042#define TV_SC_CTL_1 _MMIO(0x68060)
3984/* Turns on the first subcarrier phase generation DDA */ 4043/* Turns on the first subcarrier phase generation DDA */
3985# define TV_SC_DDA1_EN (1 << 31) 4044# define TV_SC_DDA1_EN (1 << 31)
3986/* Turns on the first subcarrier phase generation DDA */ 4045/* Turns on the first subcarrier phase generation DDA */
@@ -4002,7 +4061,7 @@ enum skl_disp_power_wells {
4002# define TV_SCDDA1_INC_MASK 0x00000fff 4061# define TV_SCDDA1_INC_MASK 0x00000fff
4003# define TV_SCDDA1_INC_SHIFT 0 4062# define TV_SCDDA1_INC_SHIFT 0
4004 4063
4005#define TV_SC_CTL_2 0x68064 4064#define TV_SC_CTL_2 _MMIO(0x68064)
4006/* Sets the rollover for the second subcarrier phase generation DDA */ 4065/* Sets the rollover for the second subcarrier phase generation DDA */
4007# define TV_SCDDA2_SIZE_MASK 0x7fff0000 4066# define TV_SCDDA2_SIZE_MASK 0x7fff0000
4008# define TV_SCDDA2_SIZE_SHIFT 16 4067# define TV_SCDDA2_SIZE_SHIFT 16
@@ -4010,7 +4069,7 @@ enum skl_disp_power_wells {
4010# define TV_SCDDA2_INC_MASK 0x00007fff 4069# define TV_SCDDA2_INC_MASK 0x00007fff
4011# define TV_SCDDA2_INC_SHIFT 0 4070# define TV_SCDDA2_INC_SHIFT 0
4012 4071
4013#define TV_SC_CTL_3 0x68068 4072#define TV_SC_CTL_3 _MMIO(0x68068)
4014/* Sets the rollover for the third subcarrier phase generation DDA */ 4073/* Sets the rollover for the third subcarrier phase generation DDA */
4015# define TV_SCDDA3_SIZE_MASK 0x7fff0000 4074# define TV_SCDDA3_SIZE_MASK 0x7fff0000
4016# define TV_SCDDA3_SIZE_SHIFT 16 4075# define TV_SCDDA3_SIZE_SHIFT 16
@@ -4018,7 +4077,7 @@ enum skl_disp_power_wells {
4018# define TV_SCDDA3_INC_MASK 0x00007fff 4077# define TV_SCDDA3_INC_MASK 0x00007fff
4019# define TV_SCDDA3_INC_SHIFT 0 4078# define TV_SCDDA3_INC_SHIFT 0
4020 4079
4021#define TV_WIN_POS 0x68070 4080#define TV_WIN_POS _MMIO(0x68070)
4022/* X coordinate of the display from the start of horizontal active */ 4081/* X coordinate of the display from the start of horizontal active */
4023# define TV_XPOS_MASK 0x1fff0000 4082# define TV_XPOS_MASK 0x1fff0000
4024# define TV_XPOS_SHIFT 16 4083# define TV_XPOS_SHIFT 16
@@ -4026,7 +4085,7 @@ enum skl_disp_power_wells {
4026# define TV_YPOS_MASK 0x00000fff 4085# define TV_YPOS_MASK 0x00000fff
4027# define TV_YPOS_SHIFT 0 4086# define TV_YPOS_SHIFT 0
4028 4087
4029#define TV_WIN_SIZE 0x68074 4088#define TV_WIN_SIZE _MMIO(0x68074)
4030/* Horizontal size of the display window, measured in pixels*/ 4089/* Horizontal size of the display window, measured in pixels*/
4031# define TV_XSIZE_MASK 0x1fff0000 4090# define TV_XSIZE_MASK 0x1fff0000
4032# define TV_XSIZE_SHIFT 16 4091# define TV_XSIZE_SHIFT 16
@@ -4038,7 +4097,7 @@ enum skl_disp_power_wells {
4038# define TV_YSIZE_MASK 0x00000fff 4097# define TV_YSIZE_MASK 0x00000fff
4039# define TV_YSIZE_SHIFT 0 4098# define TV_YSIZE_SHIFT 0
4040 4099
4041#define TV_FILTER_CTL_1 0x68080 4100#define TV_FILTER_CTL_1 _MMIO(0x68080)
4042/* 4101/*
4043 * Enables automatic scaling calculation. 4102 * Enables automatic scaling calculation.
4044 * 4103 *
@@ -4071,7 +4130,7 @@ enum skl_disp_power_wells {
4071# define TV_HSCALE_FRAC_MASK 0x00003fff 4130# define TV_HSCALE_FRAC_MASK 0x00003fff
4072# define TV_HSCALE_FRAC_SHIFT 0 4131# define TV_HSCALE_FRAC_SHIFT 0
4073 4132
4074#define TV_FILTER_CTL_2 0x68084 4133#define TV_FILTER_CTL_2 _MMIO(0x68084)
4075/* 4134/*
4076 * Sets the integer part of the 3.15 fixed-point vertical scaling factor. 4135 * Sets the integer part of the 3.15 fixed-point vertical scaling factor.
4077 * 4136 *
@@ -4087,7 +4146,7 @@ enum skl_disp_power_wells {
4087# define TV_VSCALE_FRAC_MASK 0x00007fff 4146# define TV_VSCALE_FRAC_MASK 0x00007fff
4088# define TV_VSCALE_FRAC_SHIFT 0 4147# define TV_VSCALE_FRAC_SHIFT 0
4089 4148
4090#define TV_FILTER_CTL_3 0x68088 4149#define TV_FILTER_CTL_3 _MMIO(0x68088)
4091/* 4150/*
4092 * Sets the integer part of the 3.15 fixed-point vertical scaling factor. 4151 * Sets the integer part of the 3.15 fixed-point vertical scaling factor.
4093 * 4152 *
@@ -4107,7 +4166,7 @@ enum skl_disp_power_wells {
4107# define TV_VSCALE_IP_FRAC_MASK 0x00007fff 4166# define TV_VSCALE_IP_FRAC_MASK 0x00007fff
4108# define TV_VSCALE_IP_FRAC_SHIFT 0 4167# define TV_VSCALE_IP_FRAC_SHIFT 0
4109 4168
4110#define TV_CC_CONTROL 0x68090 4169#define TV_CC_CONTROL _MMIO(0x68090)
4111# define TV_CC_ENABLE (1 << 31) 4170# define TV_CC_ENABLE (1 << 31)
4112/* 4171/*
4113 * Specifies which field to send the CC data in. 4172 * Specifies which field to send the CC data in.
@@ -4123,7 +4182,7 @@ enum skl_disp_power_wells {
4123# define TV_CC_LINE_MASK 0x0000003f 4182# define TV_CC_LINE_MASK 0x0000003f
4124# define TV_CC_LINE_SHIFT 0 4183# define TV_CC_LINE_SHIFT 0
4125 4184
4126#define TV_CC_DATA 0x68094 4185#define TV_CC_DATA _MMIO(0x68094)
4127# define TV_CC_RDY (1 << 31) 4186# define TV_CC_RDY (1 << 31)
4128/* Second word of CC data to be transmitted. */ 4187/* Second word of CC data to be transmitted. */
4129# define TV_CC_DATA_2_MASK 0x007f0000 4188# define TV_CC_DATA_2_MASK 0x007f0000
@@ -4132,20 +4191,20 @@ enum skl_disp_power_wells {
4132# define TV_CC_DATA_1_MASK 0x0000007f 4191# define TV_CC_DATA_1_MASK 0x0000007f
4133# define TV_CC_DATA_1_SHIFT 0 4192# define TV_CC_DATA_1_SHIFT 0
4134 4193
4135#define TV_H_LUMA(i) (0x68100 + (i) * 4) /* 60 registers */ 4194#define TV_H_LUMA(i) _MMIO(0x68100 + (i) * 4) /* 60 registers */
4136#define TV_H_CHROMA(i) (0x68200 + (i) * 4) /* 60 registers */ 4195#define TV_H_CHROMA(i) _MMIO(0x68200 + (i) * 4) /* 60 registers */
4137#define TV_V_LUMA(i) (0x68300 + (i) * 4) /* 43 registers */ 4196#define TV_V_LUMA(i) _MMIO(0x68300 + (i) * 4) /* 43 registers */
4138#define TV_V_CHROMA(i) (0x68400 + (i) * 4) /* 43 registers */ 4197#define TV_V_CHROMA(i) _MMIO(0x68400 + (i) * 4) /* 43 registers */
4139 4198
4140/* Display Port */ 4199/* Display Port */
4141#define DP_A 0x64000 /* eDP */ 4200#define DP_A _MMIO(0x64000) /* eDP */
4142#define DP_B 0x64100 4201#define DP_B _MMIO(0x64100)
4143#define DP_C 0x64200 4202#define DP_C _MMIO(0x64200)
4144#define DP_D 0x64300 4203#define DP_D _MMIO(0x64300)
4145 4204
4146#define VLV_DP_B (VLV_DISPLAY_BASE + DP_B) 4205#define VLV_DP_B _MMIO(VLV_DISPLAY_BASE + 0x64100)
4147#define VLV_DP_C (VLV_DISPLAY_BASE + DP_C) 4206#define VLV_DP_C _MMIO(VLV_DISPLAY_BASE + 0x64200)
4148#define CHV_DP_D (VLV_DISPLAY_BASE + DP_D) 4207#define CHV_DP_D _MMIO(VLV_DISPLAY_BASE + 0x64300)
4149 4208
4150#define DP_PORT_EN (1 << 31) 4209#define DP_PORT_EN (1 << 31)
4151#define DP_PIPEB_SELECT (1 << 30) 4210#define DP_PIPEB_SELECT (1 << 30)
@@ -4199,7 +4258,7 @@ enum skl_disp_power_wells {
4199 4258
4200/* eDP */ 4259/* eDP */
4201#define DP_PLL_FREQ_270MHZ (0 << 16) 4260#define DP_PLL_FREQ_270MHZ (0 << 16)
4202#define DP_PLL_FREQ_160MHZ (1 << 16) 4261#define DP_PLL_FREQ_162MHZ (1 << 16)
4203#define DP_PLL_FREQ_MASK (3 << 16) 4262#define DP_PLL_FREQ_MASK (3 << 16)
4204 4263
4205/* locked once port is enabled */ 4264/* locked once port is enabled */
@@ -4232,33 +4291,36 @@ enum skl_disp_power_wells {
4232 * is 20 bytes in each direction, hence the 5 fixed 4291 * is 20 bytes in each direction, hence the 5 fixed
4233 * data registers 4292 * data registers
4234 */ 4293 */
4235#define DPA_AUX_CH_CTL 0x64010 4294#define _DPA_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64010)
4236#define DPA_AUX_CH_DATA1 0x64014 4295#define _DPA_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64014)
4237#define DPA_AUX_CH_DATA2 0x64018 4296#define _DPA_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64018)
4238#define DPA_AUX_CH_DATA3 0x6401c 4297#define _DPA_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6401c)
4239#define DPA_AUX_CH_DATA4 0x64020 4298#define _DPA_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64020)
4240#define DPA_AUX_CH_DATA5 0x64024 4299#define _DPA_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64024)
4241 4300
4242#define DPB_AUX_CH_CTL 0x64110 4301#define _DPB_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64110)
4243#define DPB_AUX_CH_DATA1 0x64114 4302#define _DPB_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64114)
4244#define DPB_AUX_CH_DATA2 0x64118 4303#define _DPB_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64118)
4245#define DPB_AUX_CH_DATA3 0x6411c 4304#define _DPB_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6411c)
4246#define DPB_AUX_CH_DATA4 0x64120 4305#define _DPB_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64120)
4247#define DPB_AUX_CH_DATA5 0x64124 4306#define _DPB_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64124)
4248 4307
4249#define DPC_AUX_CH_CTL 0x64210 4308#define _DPC_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64210)
4250#define DPC_AUX_CH_DATA1 0x64214 4309#define _DPC_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64214)
4251#define DPC_AUX_CH_DATA2 0x64218 4310#define _DPC_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64218)
4252#define DPC_AUX_CH_DATA3 0x6421c 4311#define _DPC_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6421c)
4253#define DPC_AUX_CH_DATA4 0x64220 4312#define _DPC_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64220)
4254#define DPC_AUX_CH_DATA5 0x64224 4313#define _DPC_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64224)
4255 4314
4256#define DPD_AUX_CH_CTL 0x64310 4315#define _DPD_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64310)
4257#define DPD_AUX_CH_DATA1 0x64314 4316#define _DPD_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64314)
4258#define DPD_AUX_CH_DATA2 0x64318 4317#define _DPD_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64318)
4259#define DPD_AUX_CH_DATA3 0x6431c 4318#define _DPD_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6431c)
4260#define DPD_AUX_CH_DATA4 0x64320 4319#define _DPD_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64320)
4261#define DPD_AUX_CH_DATA5 0x64324 4320#define _DPD_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64324)
4321
4322#define DP_AUX_CH_CTL(port) _MMIO_PORT(port, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL)
4323#define DP_AUX_CH_DATA(port, i) _MMIO(_PORT(port, _DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
4262 4324
4263#define DP_AUX_CH_CTL_SEND_BUSY (1 << 31) 4325#define DP_AUX_CH_CTL_SEND_BUSY (1 << 31)
4264#define DP_AUX_CH_CTL_DONE (1 << 30) 4326#define DP_AUX_CH_CTL_DONE (1 << 30)
@@ -4335,10 +4397,10 @@ enum skl_disp_power_wells {
4335#define _PIPEB_LINK_N_G4X 0x71064 4397#define _PIPEB_LINK_N_G4X 0x71064
4336#define PIPEA_DP_LINK_N_MASK (0xffffff) 4398#define PIPEA_DP_LINK_N_MASK (0xffffff)
4337 4399
4338#define PIPE_DATA_M_G4X(pipe) _PIPE(pipe, _PIPEA_DATA_M_G4X, _PIPEB_DATA_M_G4X) 4400#define PIPE_DATA_M_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_DATA_M_G4X, _PIPEB_DATA_M_G4X)
4339#define PIPE_DATA_N_G4X(pipe) _PIPE(pipe, _PIPEA_DATA_N_G4X, _PIPEB_DATA_N_G4X) 4401#define PIPE_DATA_N_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_DATA_N_G4X, _PIPEB_DATA_N_G4X)
4340#define PIPE_LINK_M_G4X(pipe) _PIPE(pipe, _PIPEA_LINK_M_G4X, _PIPEB_LINK_M_G4X) 4402#define PIPE_LINK_M_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_LINK_M_G4X, _PIPEB_LINK_M_G4X)
4341#define PIPE_LINK_N_G4X(pipe) _PIPE(pipe, _PIPEA_LINK_N_G4X, _PIPEB_LINK_N_G4X) 4403#define PIPE_LINK_N_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_LINK_N_G4X, _PIPEB_LINK_N_G4X)
4342 4404
4343/* Display & cursor control */ 4405/* Display & cursor control */
4344 4406
@@ -4454,15 +4516,15 @@ enum skl_disp_power_wells {
4454 */ 4516 */
4455#define PIPE_EDP_OFFSET 0x7f000 4517#define PIPE_EDP_OFFSET 0x7f000
4456 4518
4457#define _PIPE2(pipe, reg) (dev_priv->info.pipe_offsets[pipe] - \ 4519#define _MMIO_PIPE2(pipe, reg) _MMIO(dev_priv->info.pipe_offsets[pipe] - \
4458 dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \ 4520 dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \
4459 dev_priv->info.display_mmio_offset) 4521 dev_priv->info.display_mmio_offset)
4460 4522
4461#define PIPECONF(pipe) _PIPE2(pipe, _PIPEACONF) 4523#define PIPECONF(pipe) _MMIO_PIPE2(pipe, _PIPEACONF)
4462#define PIPEDSL(pipe) _PIPE2(pipe, _PIPEADSL) 4524#define PIPEDSL(pipe) _MMIO_PIPE2(pipe, _PIPEADSL)
4463#define PIPEFRAME(pipe) _PIPE2(pipe, _PIPEAFRAMEHIGH) 4525#define PIPEFRAME(pipe) _MMIO_PIPE2(pipe, _PIPEAFRAMEHIGH)
4464#define PIPEFRAMEPIXEL(pipe) _PIPE2(pipe, _PIPEAFRAMEPIXEL) 4526#define PIPEFRAMEPIXEL(pipe) _MMIO_PIPE2(pipe, _PIPEAFRAMEPIXEL)
4465#define PIPESTAT(pipe) _PIPE2(pipe, _PIPEASTAT) 4527#define PIPESTAT(pipe) _MMIO_PIPE2(pipe, _PIPEASTAT)
4466 4528
4467#define _PIPE_MISC_A 0x70030 4529#define _PIPE_MISC_A 0x70030
4468#define _PIPE_MISC_B 0x71030 4530#define _PIPE_MISC_B 0x71030
@@ -4474,9 +4536,9 @@ enum skl_disp_power_wells {
4474#define PIPEMISC_DITHER_ENABLE (1<<4) 4536#define PIPEMISC_DITHER_ENABLE (1<<4)
4475#define PIPEMISC_DITHER_TYPE_MASK (3<<2) 4537#define PIPEMISC_DITHER_TYPE_MASK (3<<2)
4476#define PIPEMISC_DITHER_TYPE_SP (0<<2) 4538#define PIPEMISC_DITHER_TYPE_SP (0<<2)
4477#define PIPEMISC(pipe) _PIPE2(pipe, _PIPE_MISC_A) 4539#define PIPEMISC(pipe) _MMIO_PIPE2(pipe, _PIPE_MISC_A)
4478 4540
4479#define VLV_DPFLIPSTAT (VLV_DISPLAY_BASE + 0x70028) 4541#define VLV_DPFLIPSTAT _MMIO(VLV_DISPLAY_BASE + 0x70028)
4480#define PIPEB_LINE_COMPARE_INT_EN (1<<29) 4542#define PIPEB_LINE_COMPARE_INT_EN (1<<29)
4481#define PIPEB_HLINE_INT_EN (1<<28) 4543#define PIPEB_HLINE_INT_EN (1<<28)
4482#define PIPEB_VBLANK_INT_EN (1<<27) 4544#define PIPEB_VBLANK_INT_EN (1<<27)
@@ -4497,7 +4559,7 @@ enum skl_disp_power_wells {
4497#define SPRITEE_FLIPDONE_INT_EN (1<<9) 4559#define SPRITEE_FLIPDONE_INT_EN (1<<9)
4498#define PLANEC_FLIPDONE_INT_EN (1<<8) 4560#define PLANEC_FLIPDONE_INT_EN (1<<8)
4499 4561
4500#define DPINVGTT (VLV_DISPLAY_BASE + 0x7002c) /* VLV/CHV only */ 4562#define DPINVGTT _MMIO(VLV_DISPLAY_BASE + 0x7002c) /* VLV/CHV only */
4501#define SPRITEF_INVALID_GTT_INT_EN (1<<27) 4563#define SPRITEF_INVALID_GTT_INT_EN (1<<27)
4502#define SPRITEE_INVALID_GTT_INT_EN (1<<26) 4564#define SPRITEE_INVALID_GTT_INT_EN (1<<26)
4503#define PLANEC_INVALID_GTT_INT_EN (1<<25) 4565#define PLANEC_INVALID_GTT_INT_EN (1<<25)
@@ -4527,7 +4589,7 @@ enum skl_disp_power_wells {
4527#define DPINVGTT_STATUS_MASK 0xff 4589#define DPINVGTT_STATUS_MASK 0xff
4528#define DPINVGTT_STATUS_MASK_CHV 0xfff 4590#define DPINVGTT_STATUS_MASK_CHV 0xfff
4529 4591
4530#define DSPARB (dev_priv->info.display_mmio_offset + 0x70030) 4592#define DSPARB _MMIO(dev_priv->info.display_mmio_offset + 0x70030)
4531#define DSPARB_CSTART_MASK (0x7f << 7) 4593#define DSPARB_CSTART_MASK (0x7f << 7)
4532#define DSPARB_CSTART_SHIFT 7 4594#define DSPARB_CSTART_SHIFT 7
4533#define DSPARB_BSTART_MASK (0x7f) 4595#define DSPARB_BSTART_MASK (0x7f)
@@ -4542,7 +4604,7 @@ enum skl_disp_power_wells {
4542#define DSPARB_SPRITEC_MASK_VLV (0xff << 16) 4604#define DSPARB_SPRITEC_MASK_VLV (0xff << 16)
4543#define DSPARB_SPRITED_SHIFT_VLV 24 4605#define DSPARB_SPRITED_SHIFT_VLV 24
4544#define DSPARB_SPRITED_MASK_VLV (0xff << 24) 4606#define DSPARB_SPRITED_MASK_VLV (0xff << 24)
4545#define DSPARB2 (VLV_DISPLAY_BASE + 0x70060) /* vlv/chv */ 4607#define DSPARB2 _MMIO(VLV_DISPLAY_BASE + 0x70060) /* vlv/chv */
4546#define DSPARB_SPRITEA_HI_SHIFT_VLV 0 4608#define DSPARB_SPRITEA_HI_SHIFT_VLV 0
4547#define DSPARB_SPRITEA_HI_MASK_VLV (0x1 << 0) 4609#define DSPARB_SPRITEA_HI_MASK_VLV (0x1 << 0)
4548#define DSPARB_SPRITEB_HI_SHIFT_VLV 4 4610#define DSPARB_SPRITEB_HI_SHIFT_VLV 4
@@ -4555,14 +4617,14 @@ enum skl_disp_power_wells {
4555#define DSPARB_SPRITEE_HI_MASK_VLV (0x1 << 16) 4617#define DSPARB_SPRITEE_HI_MASK_VLV (0x1 << 16)
4556#define DSPARB_SPRITEF_HI_SHIFT_VLV 20 4618#define DSPARB_SPRITEF_HI_SHIFT_VLV 20
4557#define DSPARB_SPRITEF_HI_MASK_VLV (0x1 << 20) 4619#define DSPARB_SPRITEF_HI_MASK_VLV (0x1 << 20)
4558#define DSPARB3 (VLV_DISPLAY_BASE + 0x7006c) /* chv */ 4620#define DSPARB3 _MMIO(VLV_DISPLAY_BASE + 0x7006c) /* chv */
4559#define DSPARB_SPRITEE_SHIFT_VLV 0 4621#define DSPARB_SPRITEE_SHIFT_VLV 0
4560#define DSPARB_SPRITEE_MASK_VLV (0xff << 0) 4622#define DSPARB_SPRITEE_MASK_VLV (0xff << 0)
4561#define DSPARB_SPRITEF_SHIFT_VLV 8 4623#define DSPARB_SPRITEF_SHIFT_VLV 8
4562#define DSPARB_SPRITEF_MASK_VLV (0xff << 8) 4624#define DSPARB_SPRITEF_MASK_VLV (0xff << 8)
4563 4625
4564/* pnv/gen4/g4x/vlv/chv */ 4626/* pnv/gen4/g4x/vlv/chv */
4565#define DSPFW1 (dev_priv->info.display_mmio_offset + 0x70034) 4627#define DSPFW1 _MMIO(dev_priv->info.display_mmio_offset + 0x70034)
4566#define DSPFW_SR_SHIFT 23 4628#define DSPFW_SR_SHIFT 23
4567#define DSPFW_SR_MASK (0x1ff<<23) 4629#define DSPFW_SR_MASK (0x1ff<<23)
4568#define DSPFW_CURSORB_SHIFT 16 4630#define DSPFW_CURSORB_SHIFT 16
@@ -4573,7 +4635,7 @@ enum skl_disp_power_wells {
4573#define DSPFW_PLANEA_SHIFT 0 4635#define DSPFW_PLANEA_SHIFT 0
4574#define DSPFW_PLANEA_MASK (0x7f<<0) 4636#define DSPFW_PLANEA_MASK (0x7f<<0)
4575#define DSPFW_PLANEA_MASK_VLV (0xff<<0) /* vlv/chv */ 4637#define DSPFW_PLANEA_MASK_VLV (0xff<<0) /* vlv/chv */
4576#define DSPFW2 (dev_priv->info.display_mmio_offset + 0x70038) 4638#define DSPFW2 _MMIO(dev_priv->info.display_mmio_offset + 0x70038)
4577#define DSPFW_FBC_SR_EN (1<<31) /* g4x */ 4639#define DSPFW_FBC_SR_EN (1<<31) /* g4x */
4578#define DSPFW_FBC_SR_SHIFT 28 4640#define DSPFW_FBC_SR_SHIFT 28
4579#define DSPFW_FBC_SR_MASK (0x7<<28) /* g4x */ 4641#define DSPFW_FBC_SR_MASK (0x7<<28) /* g4x */
@@ -4589,7 +4651,7 @@ enum skl_disp_power_wells {
4589#define DSPFW_SPRITEA_SHIFT 0 4651#define DSPFW_SPRITEA_SHIFT 0
4590#define DSPFW_SPRITEA_MASK (0x7f<<0) /* g4x */ 4652#define DSPFW_SPRITEA_MASK (0x7f<<0) /* g4x */
4591#define DSPFW_SPRITEA_MASK_VLV (0xff<<0) /* vlv/chv */ 4653#define DSPFW_SPRITEA_MASK_VLV (0xff<<0) /* vlv/chv */
4592#define DSPFW3 (dev_priv->info.display_mmio_offset + 0x7003c) 4654#define DSPFW3 _MMIO(dev_priv->info.display_mmio_offset + 0x7003c)
4593#define DSPFW_HPLL_SR_EN (1<<31) 4655#define DSPFW_HPLL_SR_EN (1<<31)
4594#define PINEVIEW_SELF_REFRESH_EN (1<<30) 4656#define PINEVIEW_SELF_REFRESH_EN (1<<30)
4595#define DSPFW_CURSOR_SR_SHIFT 24 4657#define DSPFW_CURSOR_SR_SHIFT 24
@@ -4600,14 +4662,14 @@ enum skl_disp_power_wells {
4600#define DSPFW_HPLL_SR_MASK (0x1ff<<0) 4662#define DSPFW_HPLL_SR_MASK (0x1ff<<0)
4601 4663
4602/* vlv/chv */ 4664/* vlv/chv */
4603#define DSPFW4 (VLV_DISPLAY_BASE + 0x70070) 4665#define DSPFW4 _MMIO(VLV_DISPLAY_BASE + 0x70070)
4604#define DSPFW_SPRITEB_WM1_SHIFT 16 4666#define DSPFW_SPRITEB_WM1_SHIFT 16
4605#define DSPFW_SPRITEB_WM1_MASK (0xff<<16) 4667#define DSPFW_SPRITEB_WM1_MASK (0xff<<16)
4606#define DSPFW_CURSORA_WM1_SHIFT 8 4668#define DSPFW_CURSORA_WM1_SHIFT 8
4607#define DSPFW_CURSORA_WM1_MASK (0x3f<<8) 4669#define DSPFW_CURSORA_WM1_MASK (0x3f<<8)
4608#define DSPFW_SPRITEA_WM1_SHIFT 0 4670#define DSPFW_SPRITEA_WM1_SHIFT 0
4609#define DSPFW_SPRITEA_WM1_MASK (0xff<<0) 4671#define DSPFW_SPRITEA_WM1_MASK (0xff<<0)
4610#define DSPFW5 (VLV_DISPLAY_BASE + 0x70074) 4672#define DSPFW5 _MMIO(VLV_DISPLAY_BASE + 0x70074)
4611#define DSPFW_PLANEB_WM1_SHIFT 24 4673#define DSPFW_PLANEB_WM1_SHIFT 24
4612#define DSPFW_PLANEB_WM1_MASK (0xff<<24) 4674#define DSPFW_PLANEB_WM1_MASK (0xff<<24)
4613#define DSPFW_PLANEA_WM1_SHIFT 16 4675#define DSPFW_PLANEA_WM1_SHIFT 16
@@ -4616,11 +4678,11 @@ enum skl_disp_power_wells {
4616#define DSPFW_CURSORB_WM1_MASK (0x3f<<8) 4678#define DSPFW_CURSORB_WM1_MASK (0x3f<<8)
4617#define DSPFW_CURSOR_SR_WM1_SHIFT 0 4679#define DSPFW_CURSOR_SR_WM1_SHIFT 0
4618#define DSPFW_CURSOR_SR_WM1_MASK (0x3f<<0) 4680#define DSPFW_CURSOR_SR_WM1_MASK (0x3f<<0)
4619#define DSPFW6 (VLV_DISPLAY_BASE + 0x70078) 4681#define DSPFW6 _MMIO(VLV_DISPLAY_BASE + 0x70078)
4620#define DSPFW_SR_WM1_SHIFT 0 4682#define DSPFW_SR_WM1_SHIFT 0
4621#define DSPFW_SR_WM1_MASK (0x1ff<<0) 4683#define DSPFW_SR_WM1_MASK (0x1ff<<0)
4622#define DSPFW7 (VLV_DISPLAY_BASE + 0x7007c) 4684#define DSPFW7 _MMIO(VLV_DISPLAY_BASE + 0x7007c)
4623#define DSPFW7_CHV (VLV_DISPLAY_BASE + 0x700b4) /* wtf #1? */ 4685#define DSPFW7_CHV _MMIO(VLV_DISPLAY_BASE + 0x700b4) /* wtf #1? */
4624#define DSPFW_SPRITED_WM1_SHIFT 24 4686#define DSPFW_SPRITED_WM1_SHIFT 24
4625#define DSPFW_SPRITED_WM1_MASK (0xff<<24) 4687#define DSPFW_SPRITED_WM1_MASK (0xff<<24)
4626#define DSPFW_SPRITED_SHIFT 16 4688#define DSPFW_SPRITED_SHIFT 16
@@ -4629,7 +4691,7 @@ enum skl_disp_power_wells {
4629#define DSPFW_SPRITEC_WM1_MASK (0xff<<8) 4691#define DSPFW_SPRITEC_WM1_MASK (0xff<<8)
4630#define DSPFW_SPRITEC_SHIFT 0 4692#define DSPFW_SPRITEC_SHIFT 0
4631#define DSPFW_SPRITEC_MASK_VLV (0xff<<0) 4693#define DSPFW_SPRITEC_MASK_VLV (0xff<<0)
4632#define DSPFW8_CHV (VLV_DISPLAY_BASE + 0x700b8) 4694#define DSPFW8_CHV _MMIO(VLV_DISPLAY_BASE + 0x700b8)
4633#define DSPFW_SPRITEF_WM1_SHIFT 24 4695#define DSPFW_SPRITEF_WM1_SHIFT 24
4634#define DSPFW_SPRITEF_WM1_MASK (0xff<<24) 4696#define DSPFW_SPRITEF_WM1_MASK (0xff<<24)
4635#define DSPFW_SPRITEF_SHIFT 16 4697#define DSPFW_SPRITEF_SHIFT 16
@@ -4638,7 +4700,7 @@ enum skl_disp_power_wells {
4638#define DSPFW_SPRITEE_WM1_MASK (0xff<<8) 4700#define DSPFW_SPRITEE_WM1_MASK (0xff<<8)
4639#define DSPFW_SPRITEE_SHIFT 0 4701#define DSPFW_SPRITEE_SHIFT 0
4640#define DSPFW_SPRITEE_MASK_VLV (0xff<<0) 4702#define DSPFW_SPRITEE_MASK_VLV (0xff<<0)
4641#define DSPFW9_CHV (VLV_DISPLAY_BASE + 0x7007c) /* wtf #2? */ 4703#define DSPFW9_CHV _MMIO(VLV_DISPLAY_BASE + 0x7007c) /* wtf #2? */
4642#define DSPFW_PLANEC_WM1_SHIFT 24 4704#define DSPFW_PLANEC_WM1_SHIFT 24
4643#define DSPFW_PLANEC_WM1_MASK (0xff<<24) 4705#define DSPFW_PLANEC_WM1_MASK (0xff<<24)
4644#define DSPFW_PLANEC_SHIFT 16 4706#define DSPFW_PLANEC_SHIFT 16
@@ -4649,7 +4711,7 @@ enum skl_disp_power_wells {
4649#define DSPFW_CURSORC_MASK (0x3f<<0) 4711#define DSPFW_CURSORC_MASK (0x3f<<0)
4650 4712
4651/* vlv/chv high order bits */ 4713/* vlv/chv high order bits */
4652#define DSPHOWM (VLV_DISPLAY_BASE + 0x70064) 4714#define DSPHOWM _MMIO(VLV_DISPLAY_BASE + 0x70064)
4653#define DSPFW_SR_HI_SHIFT 24 4715#define DSPFW_SR_HI_SHIFT 24
4654#define DSPFW_SR_HI_MASK (3<<24) /* 2 bits for chv, 1 for vlv */ 4716#define DSPFW_SR_HI_MASK (3<<24) /* 2 bits for chv, 1 for vlv */
4655#define DSPFW_SPRITEF_HI_SHIFT 23 4717#define DSPFW_SPRITEF_HI_SHIFT 23
@@ -4670,7 +4732,7 @@ enum skl_disp_power_wells {
4670#define DSPFW_SPRITEA_HI_MASK (1<<4) 4732#define DSPFW_SPRITEA_HI_MASK (1<<4)
4671#define DSPFW_PLANEA_HI_SHIFT 0 4733#define DSPFW_PLANEA_HI_SHIFT 0
4672#define DSPFW_PLANEA_HI_MASK (1<<0) 4734#define DSPFW_PLANEA_HI_MASK (1<<0)
4673#define DSPHOWM1 (VLV_DISPLAY_BASE + 0x70068) 4735#define DSPHOWM1 _MMIO(VLV_DISPLAY_BASE + 0x70068)
4674#define DSPFW_SR_WM1_HI_SHIFT 24 4736#define DSPFW_SR_WM1_HI_SHIFT 24
4675#define DSPFW_SR_WM1_HI_MASK (3<<24) /* 2 bits for chv, 1 for vlv */ 4737#define DSPFW_SR_WM1_HI_MASK (3<<24) /* 2 bits for chv, 1 for vlv */
4676#define DSPFW_SPRITEF_WM1_HI_SHIFT 23 4738#define DSPFW_SPRITEF_WM1_HI_SHIFT 23
@@ -4693,7 +4755,7 @@ enum skl_disp_power_wells {
4693#define DSPFW_PLANEA_WM1_HI_MASK (1<<0) 4755#define DSPFW_PLANEA_WM1_HI_MASK (1<<0)
4694 4756
4695/* drain latency register values*/ 4757/* drain latency register values*/
4696#define VLV_DDL(pipe) (VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe)) 4758#define VLV_DDL(pipe) _MMIO(VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe))
4697#define DDL_CURSOR_SHIFT 24 4759#define DDL_CURSOR_SHIFT 24
4698#define DDL_SPRITE_SHIFT(sprite) (8+8*(sprite)) 4760#define DDL_SPRITE_SHIFT(sprite) (8+8*(sprite))
4699#define DDL_PLANE_SHIFT 0 4761#define DDL_PLANE_SHIFT 0
@@ -4701,7 +4763,7 @@ enum skl_disp_power_wells {
4701#define DDL_PRECISION_LOW (0<<7) 4763#define DDL_PRECISION_LOW (0<<7)
4702#define DRAIN_LATENCY_MASK 0x7f 4764#define DRAIN_LATENCY_MASK 0x7f
4703 4765
4704#define CBR1_VLV (VLV_DISPLAY_BASE + 0x70400) 4766#define CBR1_VLV _MMIO(VLV_DISPLAY_BASE + 0x70400)
4705#define CBR_PND_DEADLINE_DISABLE (1<<31) 4767#define CBR_PND_DEADLINE_DISABLE (1<<31)
4706#define CBR_PWM_CLOCK_MUX_SELECT (1<<30) 4768#define CBR_PWM_CLOCK_MUX_SELECT (1<<30)
4707 4769
@@ -4739,51 +4801,51 @@ enum skl_disp_power_wells {
4739#define I965_CURSOR_DFT_WM 8 4801#define I965_CURSOR_DFT_WM 8
4740 4802
4741/* Watermark register definitions for SKL */ 4803/* Watermark register definitions for SKL */
4742#define CUR_WM_A_0 0x70140 4804#define _CUR_WM_A_0 0x70140
4743#define CUR_WM_B_0 0x71140 4805#define _CUR_WM_B_0 0x71140
4744#define PLANE_WM_1_A_0 0x70240 4806#define _PLANE_WM_1_A_0 0x70240
4745#define PLANE_WM_1_B_0 0x71240 4807#define _PLANE_WM_1_B_0 0x71240
4746#define PLANE_WM_2_A_0 0x70340 4808#define _PLANE_WM_2_A_0 0x70340
4747#define PLANE_WM_2_B_0 0x71340 4809#define _PLANE_WM_2_B_0 0x71340
4748#define PLANE_WM_TRANS_1_A_0 0x70268 4810#define _PLANE_WM_TRANS_1_A_0 0x70268
4749#define PLANE_WM_TRANS_1_B_0 0x71268 4811#define _PLANE_WM_TRANS_1_B_0 0x71268
4750#define PLANE_WM_TRANS_2_A_0 0x70368 4812#define _PLANE_WM_TRANS_2_A_0 0x70368
4751#define PLANE_WM_TRANS_2_B_0 0x71368 4813#define _PLANE_WM_TRANS_2_B_0 0x71368
4752#define CUR_WM_TRANS_A_0 0x70168 4814#define _CUR_WM_TRANS_A_0 0x70168
4753#define CUR_WM_TRANS_B_0 0x71168 4815#define _CUR_WM_TRANS_B_0 0x71168
4754#define PLANE_WM_EN (1 << 31) 4816#define PLANE_WM_EN (1 << 31)
4755#define PLANE_WM_LINES_SHIFT 14 4817#define PLANE_WM_LINES_SHIFT 14
4756#define PLANE_WM_LINES_MASK 0x1f 4818#define PLANE_WM_LINES_MASK 0x1f
4757#define PLANE_WM_BLOCKS_MASK 0x3ff 4819#define PLANE_WM_BLOCKS_MASK 0x3ff
4758 4820
4759#define CUR_WM_0(pipe) _PIPE(pipe, CUR_WM_A_0, CUR_WM_B_0) 4821#define _CUR_WM_0(pipe) _PIPE(pipe, _CUR_WM_A_0, _CUR_WM_B_0)
4760#define CUR_WM(pipe, level) (CUR_WM_0(pipe) + ((4) * (level))) 4822#define CUR_WM(pipe, level) _MMIO(_CUR_WM_0(pipe) + ((4) * (level)))
4761#define CUR_WM_TRANS(pipe) _PIPE(pipe, CUR_WM_TRANS_A_0, CUR_WM_TRANS_B_0) 4823#define CUR_WM_TRANS(pipe) _MMIO_PIPE(pipe, _CUR_WM_TRANS_A_0, _CUR_WM_TRANS_B_0)
4762 4824
4763#define _PLANE_WM_1(pipe) _PIPE(pipe, PLANE_WM_1_A_0, PLANE_WM_1_B_0) 4825#define _PLANE_WM_1(pipe) _PIPE(pipe, _PLANE_WM_1_A_0, _PLANE_WM_1_B_0)
4764#define _PLANE_WM_2(pipe) _PIPE(pipe, PLANE_WM_2_A_0, PLANE_WM_2_B_0) 4826#define _PLANE_WM_2(pipe) _PIPE(pipe, _PLANE_WM_2_A_0, _PLANE_WM_2_B_0)
4765#define _PLANE_WM_BASE(pipe, plane) \ 4827#define _PLANE_WM_BASE(pipe, plane) \
4766 _PLANE(plane, _PLANE_WM_1(pipe), _PLANE_WM_2(pipe)) 4828 _PLANE(plane, _PLANE_WM_1(pipe), _PLANE_WM_2(pipe))
4767#define PLANE_WM(pipe, plane, level) \ 4829#define PLANE_WM(pipe, plane, level) \
4768 (_PLANE_WM_BASE(pipe, plane) + ((4) * (level))) 4830 _MMIO(_PLANE_WM_BASE(pipe, plane) + ((4) * (level)))
4769#define _PLANE_WM_TRANS_1(pipe) \ 4831#define _PLANE_WM_TRANS_1(pipe) \
4770 _PIPE(pipe, PLANE_WM_TRANS_1_A_0, PLANE_WM_TRANS_1_B_0) 4832 _PIPE(pipe, _PLANE_WM_TRANS_1_A_0, _PLANE_WM_TRANS_1_B_0)
4771#define _PLANE_WM_TRANS_2(pipe) \ 4833#define _PLANE_WM_TRANS_2(pipe) \
4772 _PIPE(pipe, PLANE_WM_TRANS_2_A_0, PLANE_WM_TRANS_2_B_0) 4834 _PIPE(pipe, _PLANE_WM_TRANS_2_A_0, _PLANE_WM_TRANS_2_B_0)
4773#define PLANE_WM_TRANS(pipe, plane) \ 4835#define PLANE_WM_TRANS(pipe, plane) \
4774 _PLANE(plane, _PLANE_WM_TRANS_1(pipe), _PLANE_WM_TRANS_2(pipe)) 4836 _MMIO(_PLANE(plane, _PLANE_WM_TRANS_1(pipe), _PLANE_WM_TRANS_2(pipe)))
4775 4837
4776/* define the Watermark register on Ironlake */ 4838/* define the Watermark register on Ironlake */
4777#define WM0_PIPEA_ILK 0x45100 4839#define WM0_PIPEA_ILK _MMIO(0x45100)
4778#define WM0_PIPE_PLANE_MASK (0xffff<<16) 4840#define WM0_PIPE_PLANE_MASK (0xffff<<16)
4779#define WM0_PIPE_PLANE_SHIFT 16 4841#define WM0_PIPE_PLANE_SHIFT 16
4780#define WM0_PIPE_SPRITE_MASK (0xff<<8) 4842#define WM0_PIPE_SPRITE_MASK (0xff<<8)
4781#define WM0_PIPE_SPRITE_SHIFT 8 4843#define WM0_PIPE_SPRITE_SHIFT 8
4782#define WM0_PIPE_CURSOR_MASK (0xff) 4844#define WM0_PIPE_CURSOR_MASK (0xff)
4783 4845
4784#define WM0_PIPEB_ILK 0x45104 4846#define WM0_PIPEB_ILK _MMIO(0x45104)
4785#define WM0_PIPEC_IVB 0x45200 4847#define WM0_PIPEC_IVB _MMIO(0x45200)
4786#define WM1_LP_ILK 0x45108 4848#define WM1_LP_ILK _MMIO(0x45108)
4787#define WM1_LP_SR_EN (1<<31) 4849#define WM1_LP_SR_EN (1<<31)
4788#define WM1_LP_LATENCY_SHIFT 24 4850#define WM1_LP_LATENCY_SHIFT 24
4789#define WM1_LP_LATENCY_MASK (0x7f<<24) 4851#define WM1_LP_LATENCY_MASK (0x7f<<24)
@@ -4793,13 +4855,13 @@ enum skl_disp_power_wells {
4793#define WM1_LP_SR_MASK (0x7ff<<8) 4855#define WM1_LP_SR_MASK (0x7ff<<8)
4794#define WM1_LP_SR_SHIFT 8 4856#define WM1_LP_SR_SHIFT 8
4795#define WM1_LP_CURSOR_MASK (0xff) 4857#define WM1_LP_CURSOR_MASK (0xff)
4796#define WM2_LP_ILK 0x4510c 4858#define WM2_LP_ILK _MMIO(0x4510c)
4797#define WM2_LP_EN (1<<31) 4859#define WM2_LP_EN (1<<31)
4798#define WM3_LP_ILK 0x45110 4860#define WM3_LP_ILK _MMIO(0x45110)
4799#define WM3_LP_EN (1<<31) 4861#define WM3_LP_EN (1<<31)
4800#define WM1S_LP_ILK 0x45120 4862#define WM1S_LP_ILK _MMIO(0x45120)
4801#define WM2S_LP_IVB 0x45124 4863#define WM2S_LP_IVB _MMIO(0x45124)
4802#define WM3S_LP_IVB 0x45128 4864#define WM3S_LP_IVB _MMIO(0x45128)
4803#define WM1S_LP_EN (1<<31) 4865#define WM1S_LP_EN (1<<31)
4804 4866
4805#define HSW_WM_LP_VAL(lat, fbc, pri, cur) \ 4867#define HSW_WM_LP_VAL(lat, fbc, pri, cur) \
@@ -4807,7 +4869,7 @@ enum skl_disp_power_wells {
4807 ((fbc) << WM1_LP_FBC_SHIFT) | ((pri) << WM1_LP_SR_SHIFT) | (cur)) 4869 ((fbc) << WM1_LP_FBC_SHIFT) | ((pri) << WM1_LP_SR_SHIFT) | (cur))
4808 4870
4809/* Memory latency timer register */ 4871/* Memory latency timer register */
4810#define MLTR_ILK 0x11222 4872#define MLTR_ILK _MMIO(0x11222)
4811#define MLTR_WM1_SHIFT 0 4873#define MLTR_WM1_SHIFT 0
4812#define MLTR_WM2_SHIFT 8 4874#define MLTR_WM2_SHIFT 8
4813/* the unit of memory self-refresh latency time is 0.5us */ 4875/* the unit of memory self-refresh latency time is 0.5us */
@@ -4815,7 +4877,7 @@ enum skl_disp_power_wells {
4815 4877
4816 4878
4817/* the address where we get all kinds of latency value */ 4879/* the address where we get all kinds of latency value */
4818#define SSKPD 0x5d10 4880#define SSKPD _MMIO(0x5d10)
4819#define SSKPD_WM_MASK 0x3f 4881#define SSKPD_WM_MASK 0x3f
4820#define SSKPD_WM0_SHIFT 0 4882#define SSKPD_WM0_SHIFT 0
4821#define SSKPD_WM1_SHIFT 8 4883#define SSKPD_WM1_SHIFT 8
@@ -4848,8 +4910,8 @@ enum skl_disp_power_wells {
4848/* GM45+ just has to be different */ 4910/* GM45+ just has to be different */
4849#define _PIPEA_FRMCOUNT_G4X 0x70040 4911#define _PIPEA_FRMCOUNT_G4X 0x70040
4850#define _PIPEA_FLIPCOUNT_G4X 0x70044 4912#define _PIPEA_FLIPCOUNT_G4X 0x70044
4851#define PIPE_FRMCOUNT_G4X(pipe) _PIPE2(pipe, _PIPEA_FRMCOUNT_G4X) 4913#define PIPE_FRMCOUNT_G4X(pipe) _MMIO_PIPE2(pipe, _PIPEA_FRMCOUNT_G4X)
4852#define PIPE_FLIPCOUNT_G4X(pipe) _PIPE2(pipe, _PIPEA_FLIPCOUNT_G4X) 4914#define PIPE_FLIPCOUNT_G4X(pipe) _MMIO_PIPE2(pipe, _PIPEA_FLIPCOUNT_G4X)
4853 4915
4854/* Cursor A & B regs */ 4916/* Cursor A & B regs */
4855#define _CURACNTR 0x70080 4917#define _CURACNTR 0x70080
@@ -4887,7 +4949,7 @@ enum skl_disp_power_wells {
4887#define CURSOR_POS_SIGN 0x8000 4949#define CURSOR_POS_SIGN 0x8000
4888#define CURSOR_X_SHIFT 0 4950#define CURSOR_X_SHIFT 0
4889#define CURSOR_Y_SHIFT 16 4951#define CURSOR_Y_SHIFT 16
4890#define CURSIZE 0x700a0 4952#define CURSIZE _MMIO(0x700a0)
4891#define _CURBCNTR 0x700c0 4953#define _CURBCNTR 0x700c0
4892#define _CURBBASE 0x700c4 4954#define _CURBBASE 0x700c4
4893#define _CURBPOS 0x700c8 4955#define _CURBPOS 0x700c8
@@ -4896,7 +4958,7 @@ enum skl_disp_power_wells {
4896#define _CURBBASE_IVB 0x71084 4958#define _CURBBASE_IVB 0x71084
4897#define _CURBPOS_IVB 0x71088 4959#define _CURBPOS_IVB 0x71088
4898 4960
4899#define _CURSOR2(pipe, reg) (dev_priv->info.cursor_offsets[(pipe)] - \ 4961#define _CURSOR2(pipe, reg) _MMIO(dev_priv->info.cursor_offsets[(pipe)] - \
4900 dev_priv->info.cursor_offsets[PIPE_A] + (reg) + \ 4962 dev_priv->info.cursor_offsets[PIPE_A] + (reg) + \
4901 dev_priv->info.display_mmio_offset) 4963 dev_priv->info.display_mmio_offset)
4902 4964
@@ -4957,16 +5019,16 @@ enum skl_disp_power_wells {
4957#define _DSPAOFFSET 0x701A4 /* HSW */ 5019#define _DSPAOFFSET 0x701A4 /* HSW */
4958#define _DSPASURFLIVE 0x701AC 5020#define _DSPASURFLIVE 0x701AC
4959 5021
4960#define DSPCNTR(plane) _PIPE2(plane, _DSPACNTR) 5022#define DSPCNTR(plane) _MMIO_PIPE2(plane, _DSPACNTR)
4961#define DSPADDR(plane) _PIPE2(plane, _DSPAADDR) 5023#define DSPADDR(plane) _MMIO_PIPE2(plane, _DSPAADDR)
4962#define DSPSTRIDE(plane) _PIPE2(plane, _DSPASTRIDE) 5024#define DSPSTRIDE(plane) _MMIO_PIPE2(plane, _DSPASTRIDE)
4963#define DSPPOS(plane) _PIPE2(plane, _DSPAPOS) 5025#define DSPPOS(plane) _MMIO_PIPE2(plane, _DSPAPOS)
4964#define DSPSIZE(plane) _PIPE2(plane, _DSPASIZE) 5026#define DSPSIZE(plane) _MMIO_PIPE2(plane, _DSPASIZE)
4965#define DSPSURF(plane) _PIPE2(plane, _DSPASURF) 5027#define DSPSURF(plane) _MMIO_PIPE2(plane, _DSPASURF)
4966#define DSPTILEOFF(plane) _PIPE2(plane, _DSPATILEOFF) 5028#define DSPTILEOFF(plane) _MMIO_PIPE2(plane, _DSPATILEOFF)
4967#define DSPLINOFF(plane) DSPADDR(plane) 5029#define DSPLINOFF(plane) DSPADDR(plane)
4968#define DSPOFFSET(plane) _PIPE2(plane, _DSPAOFFSET) 5030#define DSPOFFSET(plane) _MMIO_PIPE2(plane, _DSPAOFFSET)
4969#define DSPSURFLIVE(plane) _PIPE2(plane, _DSPASURFLIVE) 5031#define DSPSURFLIVE(plane) _MMIO_PIPE2(plane, _DSPASURFLIVE)
4970 5032
4971/* CHV pipe B blender and primary plane */ 5033/* CHV pipe B blender and primary plane */
4972#define _CHV_BLEND_A 0x60a00 5034#define _CHV_BLEND_A 0x60a00
@@ -4980,11 +5042,11 @@ enum skl_disp_power_wells {
4980#define _PRIMCNSTALPHA_A 0x60a10 5042#define _PRIMCNSTALPHA_A 0x60a10
4981#define PRIM_CONST_ALPHA_ENABLE (1<<31) 5043#define PRIM_CONST_ALPHA_ENABLE (1<<31)
4982 5044
4983#define CHV_BLEND(pipe) _TRANSCODER2(pipe, _CHV_BLEND_A) 5045#define CHV_BLEND(pipe) _MMIO_TRANS2(pipe, _CHV_BLEND_A)
4984#define CHV_CANVAS(pipe) _TRANSCODER2(pipe, _CHV_CANVAS_A) 5046#define CHV_CANVAS(pipe) _MMIO_TRANS2(pipe, _CHV_CANVAS_A)
4985#define PRIMPOS(plane) _TRANSCODER2(plane, _PRIMPOS_A) 5047#define PRIMPOS(plane) _MMIO_TRANS2(plane, _PRIMPOS_A)
4986#define PRIMSIZE(plane) _TRANSCODER2(plane, _PRIMSIZE_A) 5048#define PRIMSIZE(plane) _MMIO_TRANS2(plane, _PRIMSIZE_A)
4987#define PRIMCNSTALPHA(plane) _TRANSCODER2(plane, _PRIMCNSTALPHA_A) 5049#define PRIMCNSTALPHA(plane) _MMIO_TRANS2(plane, _PRIMCNSTALPHA_A)
4988 5050
4989/* Display/Sprite base address macros */ 5051/* Display/Sprite base address macros */
4990#define DISP_BASEADDR_MASK (0xfffff000) 5052#define DISP_BASEADDR_MASK (0xfffff000)
@@ -5002,9 +5064,10 @@ enum skl_disp_power_wells {
5002 * [10:1f] all 5064 * [10:1f] all
5003 * [30:32] all 5065 * [30:32] all
5004 */ 5066 */
5005#define SWF0(i) (dev_priv->info.display_mmio_offset + 0x70410 + (i) * 4) 5067#define SWF0(i) _MMIO(dev_priv->info.display_mmio_offset + 0x70410 + (i) * 4)
5006#define SWF1(i) (dev_priv->info.display_mmio_offset + 0x71410 + (i) * 4) 5068#define SWF1(i) _MMIO(dev_priv->info.display_mmio_offset + 0x71410 + (i) * 4)
5007#define SWF3(i) (dev_priv->info.display_mmio_offset + 0x72414 + (i) * 4) 5069#define SWF3(i) _MMIO(dev_priv->info.display_mmio_offset + 0x72414 + (i) * 4)
5070#define SWF_ILK(i) _MMIO(0x4F000 + (i) * 4)
5008 5071
5009/* Pipe B */ 5072/* Pipe B */
5010#define _PIPEBDSL (dev_priv->info.display_mmio_offset + 0x71000) 5073#define _PIPEBDSL (dev_priv->info.display_mmio_offset + 0x71000)
@@ -5086,18 +5149,18 @@ enum skl_disp_power_wells {
5086#define _DVSBSCALE 0x73204 5149#define _DVSBSCALE 0x73204
5087#define _DVSBGAMC 0x73300 5150#define _DVSBGAMC 0x73300
5088 5151
5089#define DVSCNTR(pipe) _PIPE(pipe, _DVSACNTR, _DVSBCNTR) 5152#define DVSCNTR(pipe) _MMIO_PIPE(pipe, _DVSACNTR, _DVSBCNTR)
5090#define DVSLINOFF(pipe) _PIPE(pipe, _DVSALINOFF, _DVSBLINOFF) 5153#define DVSLINOFF(pipe) _MMIO_PIPE(pipe, _DVSALINOFF, _DVSBLINOFF)
5091#define DVSSTRIDE(pipe) _PIPE(pipe, _DVSASTRIDE, _DVSBSTRIDE) 5154#define DVSSTRIDE(pipe) _MMIO_PIPE(pipe, _DVSASTRIDE, _DVSBSTRIDE)
5092#define DVSPOS(pipe) _PIPE(pipe, _DVSAPOS, _DVSBPOS) 5155#define DVSPOS(pipe) _MMIO_PIPE(pipe, _DVSAPOS, _DVSBPOS)
5093#define DVSSURF(pipe) _PIPE(pipe, _DVSASURF, _DVSBSURF) 5156#define DVSSURF(pipe) _MMIO_PIPE(pipe, _DVSASURF, _DVSBSURF)
5094#define DVSKEYMAX(pipe) _PIPE(pipe, _DVSAKEYMAXVAL, _DVSBKEYMAXVAL) 5157#define DVSKEYMAX(pipe) _MMIO_PIPE(pipe, _DVSAKEYMAXVAL, _DVSBKEYMAXVAL)
5095#define DVSSIZE(pipe) _PIPE(pipe, _DVSASIZE, _DVSBSIZE) 5158#define DVSSIZE(pipe) _MMIO_PIPE(pipe, _DVSASIZE, _DVSBSIZE)
5096#define DVSSCALE(pipe) _PIPE(pipe, _DVSASCALE, _DVSBSCALE) 5159#define DVSSCALE(pipe) _MMIO_PIPE(pipe, _DVSASCALE, _DVSBSCALE)
5097#define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF) 5160#define DVSTILEOFF(pipe) _MMIO_PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF)
5098#define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL) 5161#define DVSKEYVAL(pipe) _MMIO_PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
5099#define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK) 5162#define DVSKEYMSK(pipe) _MMIO_PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
5100#define DVSSURFLIVE(pipe) _PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE) 5163#define DVSSURFLIVE(pipe) _MMIO_PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE)
5101 5164
5102#define _SPRA_CTL 0x70280 5165#define _SPRA_CTL 0x70280
5103#define SPRITE_ENABLE (1<<31) 5166#define SPRITE_ENABLE (1<<31)
@@ -5160,20 +5223,20 @@ enum skl_disp_power_wells {
5160#define _SPRB_SCALE 0x71304 5223#define _SPRB_SCALE 0x71304
5161#define _SPRB_GAMC 0x71400 5224#define _SPRB_GAMC 0x71400
5162 5225
5163#define SPRCTL(pipe) _PIPE(pipe, _SPRA_CTL, _SPRB_CTL) 5226#define SPRCTL(pipe) _MMIO_PIPE(pipe, _SPRA_CTL, _SPRB_CTL)
5164#define SPRLINOFF(pipe) _PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF) 5227#define SPRLINOFF(pipe) _MMIO_PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF)
5165#define SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _SPRB_STRIDE) 5228#define SPRSTRIDE(pipe) _MMIO_PIPE(pipe, _SPRA_STRIDE, _SPRB_STRIDE)
5166#define SPRPOS(pipe) _PIPE(pipe, _SPRA_POS, _SPRB_POS) 5229#define SPRPOS(pipe) _MMIO_PIPE(pipe, _SPRA_POS, _SPRB_POS)
5167#define SPRSIZE(pipe) _PIPE(pipe, _SPRA_SIZE, _SPRB_SIZE) 5230#define SPRSIZE(pipe) _MMIO_PIPE(pipe, _SPRA_SIZE, _SPRB_SIZE)
5168#define SPRKEYVAL(pipe) _PIPE(pipe, _SPRA_KEYVAL, _SPRB_KEYVAL) 5231#define SPRKEYVAL(pipe) _MMIO_PIPE(pipe, _SPRA_KEYVAL, _SPRB_KEYVAL)
5169#define SPRKEYMSK(pipe) _PIPE(pipe, _SPRA_KEYMSK, _SPRB_KEYMSK) 5232#define SPRKEYMSK(pipe) _MMIO_PIPE(pipe, _SPRA_KEYMSK, _SPRB_KEYMSK)
5170#define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF) 5233#define SPRSURF(pipe) _MMIO_PIPE(pipe, _SPRA_SURF, _SPRB_SURF)
5171#define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX) 5234#define SPRKEYMAX(pipe) _MMIO_PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX)
5172#define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF) 5235#define SPRTILEOFF(pipe) _MMIO_PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
5173#define SPROFFSET(pipe) _PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET) 5236#define SPROFFSET(pipe) _MMIO_PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET)
5174#define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE) 5237#define SPRSCALE(pipe) _MMIO_PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
5175#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC) 5238#define SPRGAMC(pipe) _MMIO_PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
5176#define SPRSURFLIVE(pipe) _PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE) 5239#define SPRSURFLIVE(pipe) _MMIO_PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
5177 5240
5178#define _SPACNTR (VLV_DISPLAY_BASE + 0x72180) 5241#define _SPACNTR (VLV_DISPLAY_BASE + 0x72180)
5179#define SP_ENABLE (1<<31) 5242#define SP_ENABLE (1<<31)
@@ -5223,18 +5286,18 @@ enum skl_disp_power_wells {
5223#define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8) 5286#define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8)
5224#define _SPBGAMC (VLV_DISPLAY_BASE + 0x722f4) 5287#define _SPBGAMC (VLV_DISPLAY_BASE + 0x722f4)
5225 5288
5226#define SPCNTR(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPACNTR, _SPBCNTR) 5289#define SPCNTR(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPACNTR, _SPBCNTR)
5227#define SPLINOFF(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPALINOFF, _SPBLINOFF) 5290#define SPLINOFF(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPALINOFF, _SPBLINOFF)
5228#define SPSTRIDE(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPASTRIDE, _SPBSTRIDE) 5291#define SPSTRIDE(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPASTRIDE, _SPBSTRIDE)
5229#define SPPOS(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAPOS, _SPBPOS) 5292#define SPPOS(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAPOS, _SPBPOS)
5230#define SPSIZE(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPASIZE, _SPBSIZE) 5293#define SPSIZE(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPASIZE, _SPBSIZE)
5231#define SPKEYMINVAL(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAKEYMINVAL, _SPBKEYMINVAL) 5294#define SPKEYMINVAL(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAKEYMINVAL, _SPBKEYMINVAL)
5232#define SPKEYMSK(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAKEYMSK, _SPBKEYMSK) 5295#define SPKEYMSK(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAKEYMSK, _SPBKEYMSK)
5233#define SPSURF(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPASURF, _SPBSURF) 5296#define SPSURF(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPASURF, _SPBSURF)
5234#define SPKEYMAXVAL(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAKEYMAXVAL, _SPBKEYMAXVAL) 5297#define SPKEYMAXVAL(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAKEYMAXVAL, _SPBKEYMAXVAL)
5235#define SPTILEOFF(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPATILEOFF, _SPBTILEOFF) 5298#define SPTILEOFF(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPATILEOFF, _SPBTILEOFF)
5236#define SPCONSTALPHA(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPACONSTALPHA, _SPBCONSTALPHA) 5299#define SPCONSTALPHA(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPACONSTALPHA, _SPBCONSTALPHA)
5237#define SPGAMC(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAGAMC, _SPBGAMC) 5300#define SPGAMC(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAGAMC, _SPBGAMC)
5238 5301
5239/* 5302/*
5240 * CHV pipe B sprite CSC 5303 * CHV pipe B sprite CSC
@@ -5243,29 +5306,29 @@ enum skl_disp_power_wells {
5243 * |yg| = |c3 c4 c5| x |yg + yg_ioff| + |yg_ooff| 5306 * |yg| = |c3 c4 c5| x |yg + yg_ioff| + |yg_ooff|
5244 * |cb| |c6 c7 c8| |cb + cr_ioff| |cb_ooff| 5307 * |cb| |c6 c7 c8| |cb + cr_ioff| |cb_ooff|
5245 */ 5308 */
5246#define SPCSCYGOFF(sprite) (VLV_DISPLAY_BASE + 0x6d900 + (sprite) * 0x1000) 5309#define SPCSCYGOFF(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d900 + (sprite) * 0x1000)
5247#define SPCSCCBOFF(sprite) (VLV_DISPLAY_BASE + 0x6d904 + (sprite) * 0x1000) 5310#define SPCSCCBOFF(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d904 + (sprite) * 0x1000)
5248#define SPCSCCROFF(sprite) (VLV_DISPLAY_BASE + 0x6d908 + (sprite) * 0x1000) 5311#define SPCSCCROFF(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d908 + (sprite) * 0x1000)
5249#define SPCSC_OOFF(x) (((x) & 0x7ff) << 16) /* s11 */ 5312#define SPCSC_OOFF(x) (((x) & 0x7ff) << 16) /* s11 */
5250#define SPCSC_IOFF(x) (((x) & 0x7ff) << 0) /* s11 */ 5313#define SPCSC_IOFF(x) (((x) & 0x7ff) << 0) /* s11 */
5251 5314
5252#define SPCSCC01(sprite) (VLV_DISPLAY_BASE + 0x6d90c + (sprite) * 0x1000) 5315#define SPCSCC01(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d90c + (sprite) * 0x1000)
5253#define SPCSCC23(sprite) (VLV_DISPLAY_BASE + 0x6d910 + (sprite) * 0x1000) 5316#define SPCSCC23(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d910 + (sprite) * 0x1000)
5254#define SPCSCC45(sprite) (VLV_DISPLAY_BASE + 0x6d914 + (sprite) * 0x1000) 5317#define SPCSCC45(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d914 + (sprite) * 0x1000)
5255#define SPCSCC67(sprite) (VLV_DISPLAY_BASE + 0x6d918 + (sprite) * 0x1000) 5318#define SPCSCC67(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d918 + (sprite) * 0x1000)
5256#define SPCSCC8(sprite) (VLV_DISPLAY_BASE + 0x6d91c + (sprite) * 0x1000) 5319#define SPCSCC8(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d91c + (sprite) * 0x1000)
5257#define SPCSC_C1(x) (((x) & 0x7fff) << 16) /* s3.12 */ 5320#define SPCSC_C1(x) (((x) & 0x7fff) << 16) /* s3.12 */
5258#define SPCSC_C0(x) (((x) & 0x7fff) << 0) /* s3.12 */ 5321#define SPCSC_C0(x) (((x) & 0x7fff) << 0) /* s3.12 */
5259 5322
5260#define SPCSCYGICLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d920 + (sprite) * 0x1000) 5323#define SPCSCYGICLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d920 + (sprite) * 0x1000)
5261#define SPCSCCBICLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d924 + (sprite) * 0x1000) 5324#define SPCSCCBICLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d924 + (sprite) * 0x1000)
5262#define SPCSCCRICLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d928 + (sprite) * 0x1000) 5325#define SPCSCCRICLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d928 + (sprite) * 0x1000)
5263#define SPCSC_IMAX(x) (((x) & 0x7ff) << 16) /* s11 */ 5326#define SPCSC_IMAX(x) (((x) & 0x7ff) << 16) /* s11 */
5264#define SPCSC_IMIN(x) (((x) & 0x7ff) << 0) /* s11 */ 5327#define SPCSC_IMIN(x) (((x) & 0x7ff) << 0) /* s11 */
5265 5328
5266#define SPCSCYGOCLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d92c + (sprite) * 0x1000) 5329#define SPCSCYGOCLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d92c + (sprite) * 0x1000)
5267#define SPCSCCBOCLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d930 + (sprite) * 0x1000) 5330#define SPCSCCBOCLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d930 + (sprite) * 0x1000)
5268#define SPCSCCROCLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d934 + (sprite) * 0x1000) 5331#define SPCSCCROCLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d934 + (sprite) * 0x1000)
5269#define SPCSC_OMAX(x) ((x) << 16) /* u10 */ 5332#define SPCSC_OMAX(x) ((x) << 16) /* u10 */
5270#define SPCSC_OMIN(x) ((x) << 0) /* u10 */ 5333#define SPCSC_OMIN(x) ((x) << 0) /* u10 */
5271 5334
@@ -5346,7 +5409,7 @@ enum skl_disp_power_wells {
5346#define _PLANE_CTL_2(pipe) _PIPE(pipe, _PLANE_CTL_2_A, _PLANE_CTL_2_B) 5409#define _PLANE_CTL_2(pipe) _PIPE(pipe, _PLANE_CTL_2_A, _PLANE_CTL_2_B)
5347#define _PLANE_CTL_3(pipe) _PIPE(pipe, _PLANE_CTL_3_A, _PLANE_CTL_3_B) 5410#define _PLANE_CTL_3(pipe) _PIPE(pipe, _PLANE_CTL_3_A, _PLANE_CTL_3_B)
5348#define PLANE_CTL(pipe, plane) \ 5411#define PLANE_CTL(pipe, plane) \
5349 _PLANE(plane, _PLANE_CTL_1(pipe), _PLANE_CTL_2(pipe)) 5412 _MMIO_PLANE(plane, _PLANE_CTL_1(pipe), _PLANE_CTL_2(pipe))
5350 5413
5351#define _PLANE_STRIDE_1_B 0x71188 5414#define _PLANE_STRIDE_1_B 0x71188
5352#define _PLANE_STRIDE_2_B 0x71288 5415#define _PLANE_STRIDE_2_B 0x71288
@@ -5358,7 +5421,7 @@ enum skl_disp_power_wells {
5358#define _PLANE_STRIDE_3(pipe) \ 5421#define _PLANE_STRIDE_3(pipe) \
5359 _PIPE(pipe, _PLANE_STRIDE_3_A, _PLANE_STRIDE_3_B) 5422 _PIPE(pipe, _PLANE_STRIDE_3_A, _PLANE_STRIDE_3_B)
5360#define PLANE_STRIDE(pipe, plane) \ 5423#define PLANE_STRIDE(pipe, plane) \
5361 _PLANE(plane, _PLANE_STRIDE_1(pipe), _PLANE_STRIDE_2(pipe)) 5424 _MMIO_PLANE(plane, _PLANE_STRIDE_1(pipe), _PLANE_STRIDE_2(pipe))
5362 5425
5363#define _PLANE_POS_1_B 0x7118c 5426#define _PLANE_POS_1_B 0x7118c
5364#define _PLANE_POS_2_B 0x7128c 5427#define _PLANE_POS_2_B 0x7128c
@@ -5367,7 +5430,7 @@ enum skl_disp_power_wells {
5367#define _PLANE_POS_2(pipe) _PIPE(pipe, _PLANE_POS_2_A, _PLANE_POS_2_B) 5430#define _PLANE_POS_2(pipe) _PIPE(pipe, _PLANE_POS_2_A, _PLANE_POS_2_B)
5368#define _PLANE_POS_3(pipe) _PIPE(pipe, _PLANE_POS_3_A, _PLANE_POS_3_B) 5431#define _PLANE_POS_3(pipe) _PIPE(pipe, _PLANE_POS_3_A, _PLANE_POS_3_B)
5369#define PLANE_POS(pipe, plane) \ 5432#define PLANE_POS(pipe, plane) \
5370 _PLANE(plane, _PLANE_POS_1(pipe), _PLANE_POS_2(pipe)) 5433 _MMIO_PLANE(plane, _PLANE_POS_1(pipe), _PLANE_POS_2(pipe))
5371 5434
5372#define _PLANE_SIZE_1_B 0x71190 5435#define _PLANE_SIZE_1_B 0x71190
5373#define _PLANE_SIZE_2_B 0x71290 5436#define _PLANE_SIZE_2_B 0x71290
@@ -5376,7 +5439,7 @@ enum skl_disp_power_wells {
5376#define _PLANE_SIZE_2(pipe) _PIPE(pipe, _PLANE_SIZE_2_A, _PLANE_SIZE_2_B) 5439#define _PLANE_SIZE_2(pipe) _PIPE(pipe, _PLANE_SIZE_2_A, _PLANE_SIZE_2_B)
5377#define _PLANE_SIZE_3(pipe) _PIPE(pipe, _PLANE_SIZE_3_A, _PLANE_SIZE_3_B) 5440#define _PLANE_SIZE_3(pipe) _PIPE(pipe, _PLANE_SIZE_3_A, _PLANE_SIZE_3_B)
5378#define PLANE_SIZE(pipe, plane) \ 5441#define PLANE_SIZE(pipe, plane) \
5379 _PLANE(plane, _PLANE_SIZE_1(pipe), _PLANE_SIZE_2(pipe)) 5442 _MMIO_PLANE(plane, _PLANE_SIZE_1(pipe), _PLANE_SIZE_2(pipe))
5380 5443
5381#define _PLANE_SURF_1_B 0x7119c 5444#define _PLANE_SURF_1_B 0x7119c
5382#define _PLANE_SURF_2_B 0x7129c 5445#define _PLANE_SURF_2_B 0x7129c
@@ -5385,35 +5448,35 @@ enum skl_disp_power_wells {
5385#define _PLANE_SURF_2(pipe) _PIPE(pipe, _PLANE_SURF_2_A, _PLANE_SURF_2_B) 5448#define _PLANE_SURF_2(pipe) _PIPE(pipe, _PLANE_SURF_2_A, _PLANE_SURF_2_B)
5386#define _PLANE_SURF_3(pipe) _PIPE(pipe, _PLANE_SURF_3_A, _PLANE_SURF_3_B) 5449#define _PLANE_SURF_3(pipe) _PIPE(pipe, _PLANE_SURF_3_A, _PLANE_SURF_3_B)
5387#define PLANE_SURF(pipe, plane) \ 5450#define PLANE_SURF(pipe, plane) \
5388 _PLANE(plane, _PLANE_SURF_1(pipe), _PLANE_SURF_2(pipe)) 5451 _MMIO_PLANE(plane, _PLANE_SURF_1(pipe), _PLANE_SURF_2(pipe))
5389 5452
5390#define _PLANE_OFFSET_1_B 0x711a4 5453#define _PLANE_OFFSET_1_B 0x711a4
5391#define _PLANE_OFFSET_2_B 0x712a4 5454#define _PLANE_OFFSET_2_B 0x712a4
5392#define _PLANE_OFFSET_1(pipe) _PIPE(pipe, _PLANE_OFFSET_1_A, _PLANE_OFFSET_1_B) 5455#define _PLANE_OFFSET_1(pipe) _PIPE(pipe, _PLANE_OFFSET_1_A, _PLANE_OFFSET_1_B)
5393#define _PLANE_OFFSET_2(pipe) _PIPE(pipe, _PLANE_OFFSET_2_A, _PLANE_OFFSET_2_B) 5456#define _PLANE_OFFSET_2(pipe) _PIPE(pipe, _PLANE_OFFSET_2_A, _PLANE_OFFSET_2_B)
5394#define PLANE_OFFSET(pipe, plane) \ 5457#define PLANE_OFFSET(pipe, plane) \
5395 _PLANE(plane, _PLANE_OFFSET_1(pipe), _PLANE_OFFSET_2(pipe)) 5458 _MMIO_PLANE(plane, _PLANE_OFFSET_1(pipe), _PLANE_OFFSET_2(pipe))
5396 5459
5397#define _PLANE_KEYVAL_1_B 0x71194 5460#define _PLANE_KEYVAL_1_B 0x71194
5398#define _PLANE_KEYVAL_2_B 0x71294 5461#define _PLANE_KEYVAL_2_B 0x71294
5399#define _PLANE_KEYVAL_1(pipe) _PIPE(pipe, _PLANE_KEYVAL_1_A, _PLANE_KEYVAL_1_B) 5462#define _PLANE_KEYVAL_1(pipe) _PIPE(pipe, _PLANE_KEYVAL_1_A, _PLANE_KEYVAL_1_B)
5400#define _PLANE_KEYVAL_2(pipe) _PIPE(pipe, _PLANE_KEYVAL_2_A, _PLANE_KEYVAL_2_B) 5463#define _PLANE_KEYVAL_2(pipe) _PIPE(pipe, _PLANE_KEYVAL_2_A, _PLANE_KEYVAL_2_B)
5401#define PLANE_KEYVAL(pipe, plane) \ 5464#define PLANE_KEYVAL(pipe, plane) \
5402 _PLANE(plane, _PLANE_KEYVAL_1(pipe), _PLANE_KEYVAL_2(pipe)) 5465 _MMIO_PLANE(plane, _PLANE_KEYVAL_1(pipe), _PLANE_KEYVAL_2(pipe))
5403 5466
5404#define _PLANE_KEYMSK_1_B 0x71198 5467#define _PLANE_KEYMSK_1_B 0x71198
5405#define _PLANE_KEYMSK_2_B 0x71298 5468#define _PLANE_KEYMSK_2_B 0x71298
5406#define _PLANE_KEYMSK_1(pipe) _PIPE(pipe, _PLANE_KEYMSK_1_A, _PLANE_KEYMSK_1_B) 5469#define _PLANE_KEYMSK_1(pipe) _PIPE(pipe, _PLANE_KEYMSK_1_A, _PLANE_KEYMSK_1_B)
5407#define _PLANE_KEYMSK_2(pipe) _PIPE(pipe, _PLANE_KEYMSK_2_A, _PLANE_KEYMSK_2_B) 5470#define _PLANE_KEYMSK_2(pipe) _PIPE(pipe, _PLANE_KEYMSK_2_A, _PLANE_KEYMSK_2_B)
5408#define PLANE_KEYMSK(pipe, plane) \ 5471#define PLANE_KEYMSK(pipe, plane) \
5409 _PLANE(plane, _PLANE_KEYMSK_1(pipe), _PLANE_KEYMSK_2(pipe)) 5472 _MMIO_PLANE(plane, _PLANE_KEYMSK_1(pipe), _PLANE_KEYMSK_2(pipe))
5410 5473
5411#define _PLANE_KEYMAX_1_B 0x711a0 5474#define _PLANE_KEYMAX_1_B 0x711a0
5412#define _PLANE_KEYMAX_2_B 0x712a0 5475#define _PLANE_KEYMAX_2_B 0x712a0
5413#define _PLANE_KEYMAX_1(pipe) _PIPE(pipe, _PLANE_KEYMAX_1_A, _PLANE_KEYMAX_1_B) 5476#define _PLANE_KEYMAX_1(pipe) _PIPE(pipe, _PLANE_KEYMAX_1_A, _PLANE_KEYMAX_1_B)
5414#define _PLANE_KEYMAX_2(pipe) _PIPE(pipe, _PLANE_KEYMAX_2_A, _PLANE_KEYMAX_2_B) 5477#define _PLANE_KEYMAX_2(pipe) _PIPE(pipe, _PLANE_KEYMAX_2_A, _PLANE_KEYMAX_2_B)
5415#define PLANE_KEYMAX(pipe, plane) \ 5478#define PLANE_KEYMAX(pipe, plane) \
5416 _PLANE(plane, _PLANE_KEYMAX_1(pipe), _PLANE_KEYMAX_2(pipe)) 5479 _MMIO_PLANE(plane, _PLANE_KEYMAX_1(pipe), _PLANE_KEYMAX_2(pipe))
5417 5480
5418#define _PLANE_BUF_CFG_1_B 0x7127c 5481#define _PLANE_BUF_CFG_1_B 0x7127c
5419#define _PLANE_BUF_CFG_2_B 0x7137c 5482#define _PLANE_BUF_CFG_2_B 0x7137c
@@ -5422,7 +5485,7 @@ enum skl_disp_power_wells {
5422#define _PLANE_BUF_CFG_2(pipe) \ 5485#define _PLANE_BUF_CFG_2(pipe) \
5423 _PIPE(pipe, _PLANE_BUF_CFG_2_A, _PLANE_BUF_CFG_2_B) 5486 _PIPE(pipe, _PLANE_BUF_CFG_2_A, _PLANE_BUF_CFG_2_B)
5424#define PLANE_BUF_CFG(pipe, plane) \ 5487#define PLANE_BUF_CFG(pipe, plane) \
5425 _PLANE(plane, _PLANE_BUF_CFG_1(pipe), _PLANE_BUF_CFG_2(pipe)) 5488 _MMIO_PLANE(plane, _PLANE_BUF_CFG_1(pipe), _PLANE_BUF_CFG_2(pipe))
5426 5489
5427#define _PLANE_NV12_BUF_CFG_1_B 0x71278 5490#define _PLANE_NV12_BUF_CFG_1_B 0x71278
5428#define _PLANE_NV12_BUF_CFG_2_B 0x71378 5491#define _PLANE_NV12_BUF_CFG_2_B 0x71378
@@ -5431,26 +5494,26 @@ enum skl_disp_power_wells {
5431#define _PLANE_NV12_BUF_CFG_2(pipe) \ 5494#define _PLANE_NV12_BUF_CFG_2(pipe) \
5432 _PIPE(pipe, _PLANE_NV12_BUF_CFG_2_A, _PLANE_NV12_BUF_CFG_2_B) 5495 _PIPE(pipe, _PLANE_NV12_BUF_CFG_2_A, _PLANE_NV12_BUF_CFG_2_B)
5433#define PLANE_NV12_BUF_CFG(pipe, plane) \ 5496#define PLANE_NV12_BUF_CFG(pipe, plane) \
5434 _PLANE(plane, _PLANE_NV12_BUF_CFG_1(pipe), _PLANE_NV12_BUF_CFG_2(pipe)) 5497 _MMIO_PLANE(plane, _PLANE_NV12_BUF_CFG_1(pipe), _PLANE_NV12_BUF_CFG_2(pipe))
5435 5498
5436/* SKL new cursor registers */ 5499/* SKL new cursor registers */
5437#define _CUR_BUF_CFG_A 0x7017c 5500#define _CUR_BUF_CFG_A 0x7017c
5438#define _CUR_BUF_CFG_B 0x7117c 5501#define _CUR_BUF_CFG_B 0x7117c
5439#define CUR_BUF_CFG(pipe) _PIPE(pipe, _CUR_BUF_CFG_A, _CUR_BUF_CFG_B) 5502#define CUR_BUF_CFG(pipe) _MMIO_PIPE(pipe, _CUR_BUF_CFG_A, _CUR_BUF_CFG_B)
5440 5503
5441/* VBIOS regs */ 5504/* VBIOS regs */
5442#define VGACNTRL 0x71400 5505#define VGACNTRL _MMIO(0x71400)
5443# define VGA_DISP_DISABLE (1 << 31) 5506# define VGA_DISP_DISABLE (1 << 31)
5444# define VGA_2X_MODE (1 << 30) 5507# define VGA_2X_MODE (1 << 30)
5445# define VGA_PIPE_B_SELECT (1 << 29) 5508# define VGA_PIPE_B_SELECT (1 << 29)
5446 5509
5447#define VLV_VGACNTRL (VLV_DISPLAY_BASE + 0x71400) 5510#define VLV_VGACNTRL _MMIO(VLV_DISPLAY_BASE + 0x71400)
5448 5511
5449/* Ironlake */ 5512/* Ironlake */
5450 5513
5451#define CPU_VGACNTRL 0x41000 5514#define CPU_VGACNTRL _MMIO(0x41000)
5452 5515
5453#define DIGITAL_PORT_HOTPLUG_CNTRL 0x44030 5516#define DIGITAL_PORT_HOTPLUG_CNTRL _MMIO(0x44030)
5454#define DIGITAL_PORTA_HOTPLUG_ENABLE (1 << 4) 5517#define DIGITAL_PORTA_HOTPLUG_ENABLE (1 << 4)
5455#define DIGITAL_PORTA_PULSE_DURATION_2ms (0 << 2) /* pre-HSW */ 5518#define DIGITAL_PORTA_PULSE_DURATION_2ms (0 << 2) /* pre-HSW */
5456#define DIGITAL_PORTA_PULSE_DURATION_4_5ms (1 << 2) /* pre-HSW */ 5519#define DIGITAL_PORTA_PULSE_DURATION_4_5ms (1 << 2) /* pre-HSW */
@@ -5463,26 +5526,26 @@ enum skl_disp_power_wells {
5463#define DIGITAL_PORTA_HOTPLUG_LONG_DETECT (2 << 0) 5526#define DIGITAL_PORTA_HOTPLUG_LONG_DETECT (2 << 0)
5464 5527
5465/* refresh rate hardware control */ 5528/* refresh rate hardware control */
5466#define RR_HW_CTL 0x45300 5529#define RR_HW_CTL _MMIO(0x45300)
5467#define RR_HW_LOW_POWER_FRAMES_MASK 0xff 5530#define RR_HW_LOW_POWER_FRAMES_MASK 0xff
5468#define RR_HW_HIGH_POWER_FRAMES_MASK 0xff00 5531#define RR_HW_HIGH_POWER_FRAMES_MASK 0xff00
5469 5532
5470#define FDI_PLL_BIOS_0 0x46000 5533#define FDI_PLL_BIOS_0 _MMIO(0x46000)
5471#define FDI_PLL_FB_CLOCK_MASK 0xff 5534#define FDI_PLL_FB_CLOCK_MASK 0xff
5472#define FDI_PLL_BIOS_1 0x46004 5535#define FDI_PLL_BIOS_1 _MMIO(0x46004)
5473#define FDI_PLL_BIOS_2 0x46008 5536#define FDI_PLL_BIOS_2 _MMIO(0x46008)
5474#define DISPLAY_PORT_PLL_BIOS_0 0x4600c 5537#define DISPLAY_PORT_PLL_BIOS_0 _MMIO(0x4600c)
5475#define DISPLAY_PORT_PLL_BIOS_1 0x46010 5538#define DISPLAY_PORT_PLL_BIOS_1 _MMIO(0x46010)
5476#define DISPLAY_PORT_PLL_BIOS_2 0x46014 5539#define DISPLAY_PORT_PLL_BIOS_2 _MMIO(0x46014)
5477 5540
5478#define PCH_3DCGDIS0 0x46020 5541#define PCH_3DCGDIS0 _MMIO(0x46020)
5479# define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18) 5542# define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18)
5480# define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1) 5543# define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1)
5481 5544
5482#define PCH_3DCGDIS1 0x46024 5545#define PCH_3DCGDIS1 _MMIO(0x46024)
5483# define VFMUNIT_CLOCK_GATE_DISABLE (1 << 11) 5546# define VFMUNIT_CLOCK_GATE_DISABLE (1 << 11)
5484 5547
5485#define FDI_PLL_FREQ_CTL 0x46030 5548#define FDI_PLL_FREQ_CTL _MMIO(0x46030)
5486#define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24) 5549#define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24)
5487#define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00 5550#define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00
5488#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff 5551#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff
@@ -5519,14 +5582,14 @@ enum skl_disp_power_wells {
5519#define _PIPEB_LINK_M2 0x61048 5582#define _PIPEB_LINK_M2 0x61048
5520#define _PIPEB_LINK_N2 0x6104c 5583#define _PIPEB_LINK_N2 0x6104c
5521 5584
5522#define PIPE_DATA_M1(tran) _TRANSCODER2(tran, _PIPEA_DATA_M1) 5585#define PIPE_DATA_M1(tran) _MMIO_TRANS2(tran, _PIPEA_DATA_M1)
5523#define PIPE_DATA_N1(tran) _TRANSCODER2(tran, _PIPEA_DATA_N1) 5586#define PIPE_DATA_N1(tran) _MMIO_TRANS2(tran, _PIPEA_DATA_N1)
5524#define PIPE_DATA_M2(tran) _TRANSCODER2(tran, _PIPEA_DATA_M2) 5587#define PIPE_DATA_M2(tran) _MMIO_TRANS2(tran, _PIPEA_DATA_M2)
5525#define PIPE_DATA_N2(tran) _TRANSCODER2(tran, _PIPEA_DATA_N2) 5588#define PIPE_DATA_N2(tran) _MMIO_TRANS2(tran, _PIPEA_DATA_N2)
5526#define PIPE_LINK_M1(tran) _TRANSCODER2(tran, _PIPEA_LINK_M1) 5589#define PIPE_LINK_M1(tran) _MMIO_TRANS2(tran, _PIPEA_LINK_M1)
5527#define PIPE_LINK_N1(tran) _TRANSCODER2(tran, _PIPEA_LINK_N1) 5590#define PIPE_LINK_N1(tran) _MMIO_TRANS2(tran, _PIPEA_LINK_N1)
5528#define PIPE_LINK_M2(tran) _TRANSCODER2(tran, _PIPEA_LINK_M2) 5591#define PIPE_LINK_M2(tran) _MMIO_TRANS2(tran, _PIPEA_LINK_M2)
5529#define PIPE_LINK_N2(tran) _TRANSCODER2(tran, _PIPEA_LINK_N2) 5592#define PIPE_LINK_N2(tran) _MMIO_TRANS2(tran, _PIPEA_LINK_N2)
5530 5593
5531/* CPU panel fitter */ 5594/* CPU panel fitter */
5532/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */ 5595/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
@@ -5549,11 +5612,11 @@ enum skl_disp_power_wells {
5549#define _PFA_HSCALE 0x68090 5612#define _PFA_HSCALE 0x68090
5550#define _PFB_HSCALE 0x68890 5613#define _PFB_HSCALE 0x68890
5551 5614
5552#define PF_CTL(pipe) _PIPE(pipe, _PFA_CTL_1, _PFB_CTL_1) 5615#define PF_CTL(pipe) _MMIO_PIPE(pipe, _PFA_CTL_1, _PFB_CTL_1)
5553#define PF_WIN_SZ(pipe) _PIPE(pipe, _PFA_WIN_SZ, _PFB_WIN_SZ) 5616#define PF_WIN_SZ(pipe) _MMIO_PIPE(pipe, _PFA_WIN_SZ, _PFB_WIN_SZ)
5554#define PF_WIN_POS(pipe) _PIPE(pipe, _PFA_WIN_POS, _PFB_WIN_POS) 5617#define PF_WIN_POS(pipe) _MMIO_PIPE(pipe, _PFA_WIN_POS, _PFB_WIN_POS)
5555#define PF_VSCALE(pipe) _PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE) 5618#define PF_VSCALE(pipe) _MMIO_PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE)
5556#define PF_HSCALE(pipe) _PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE) 5619#define PF_HSCALE(pipe) _MMIO_PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE)
5557 5620
5558#define _PSA_CTL 0x68180 5621#define _PSA_CTL 0x68180
5559#define _PSB_CTL 0x68980 5622#define _PSB_CTL 0x68980
@@ -5563,9 +5626,9 @@ enum skl_disp_power_wells {
5563#define _PSA_WIN_POS 0x68170 5626#define _PSA_WIN_POS 0x68170
5564#define _PSB_WIN_POS 0x68970 5627#define _PSB_WIN_POS 0x68970
5565 5628
5566#define PS_CTL(pipe) _PIPE(pipe, _PSA_CTL, _PSB_CTL) 5629#define PS_CTL(pipe) _MMIO_PIPE(pipe, _PSA_CTL, _PSB_CTL)
5567#define PS_WIN_SZ(pipe) _PIPE(pipe, _PSA_WIN_SZ, _PSB_WIN_SZ) 5630#define PS_WIN_SZ(pipe) _MMIO_PIPE(pipe, _PSA_WIN_SZ, _PSB_WIN_SZ)
5568#define PS_WIN_POS(pipe) _PIPE(pipe, _PSA_WIN_POS, _PSB_WIN_POS) 5631#define PS_WIN_POS(pipe) _MMIO_PIPE(pipe, _PSA_WIN_POS, _PSB_WIN_POS)
5569 5632
5570/* 5633/*
5571 * Skylake scalers 5634 * Skylake scalers
@@ -5654,48 +5717,63 @@ enum skl_disp_power_wells {
5654#define _PS_ECC_STAT_1C 0x691D0 5717#define _PS_ECC_STAT_1C 0x691D0
5655 5718
5656#define _ID(id, a, b) ((a) + (id)*((b)-(a))) 5719#define _ID(id, a, b) ((a) + (id)*((b)-(a)))
5657#define SKL_PS_CTRL(pipe, id) _PIPE(pipe, \ 5720#define SKL_PS_CTRL(pipe, id) _MMIO_PIPE(pipe, \
5658 _ID(id, _PS_1A_CTRL, _PS_2A_CTRL), \ 5721 _ID(id, _PS_1A_CTRL, _PS_2A_CTRL), \
5659 _ID(id, _PS_1B_CTRL, _PS_2B_CTRL)) 5722 _ID(id, _PS_1B_CTRL, _PS_2B_CTRL))
5660#define SKL_PS_PWR_GATE(pipe, id) _PIPE(pipe, \ 5723#define SKL_PS_PWR_GATE(pipe, id) _MMIO_PIPE(pipe, \
5661 _ID(id, _PS_PWR_GATE_1A, _PS_PWR_GATE_2A), \ 5724 _ID(id, _PS_PWR_GATE_1A, _PS_PWR_GATE_2A), \
5662 _ID(id, _PS_PWR_GATE_1B, _PS_PWR_GATE_2B)) 5725 _ID(id, _PS_PWR_GATE_1B, _PS_PWR_GATE_2B))
5663#define SKL_PS_WIN_POS(pipe, id) _PIPE(pipe, \ 5726#define SKL_PS_WIN_POS(pipe, id) _MMIO_PIPE(pipe, \
5664 _ID(id, _PS_WIN_POS_1A, _PS_WIN_POS_2A), \ 5727 _ID(id, _PS_WIN_POS_1A, _PS_WIN_POS_2A), \
5665 _ID(id, _PS_WIN_POS_1B, _PS_WIN_POS_2B)) 5728 _ID(id, _PS_WIN_POS_1B, _PS_WIN_POS_2B))
5666#define SKL_PS_WIN_SZ(pipe, id) _PIPE(pipe, \ 5729#define SKL_PS_WIN_SZ(pipe, id) _MMIO_PIPE(pipe, \
5667 _ID(id, _PS_WIN_SZ_1A, _PS_WIN_SZ_2A), \ 5730 _ID(id, _PS_WIN_SZ_1A, _PS_WIN_SZ_2A), \
5668 _ID(id, _PS_WIN_SZ_1B, _PS_WIN_SZ_2B)) 5731 _ID(id, _PS_WIN_SZ_1B, _PS_WIN_SZ_2B))
5669#define SKL_PS_VSCALE(pipe, id) _PIPE(pipe, \ 5732#define SKL_PS_VSCALE(pipe, id) _MMIO_PIPE(pipe, \
5670 _ID(id, _PS_VSCALE_1A, _PS_VSCALE_2A), \ 5733 _ID(id, _PS_VSCALE_1A, _PS_VSCALE_2A), \
5671 _ID(id, _PS_VSCALE_1B, _PS_VSCALE_2B)) 5734 _ID(id, _PS_VSCALE_1B, _PS_VSCALE_2B))
5672#define SKL_PS_HSCALE(pipe, id) _PIPE(pipe, \ 5735#define SKL_PS_HSCALE(pipe, id) _MMIO_PIPE(pipe, \
5673 _ID(id, _PS_HSCALE_1A, _PS_HSCALE_2A), \ 5736 _ID(id, _PS_HSCALE_1A, _PS_HSCALE_2A), \
5674 _ID(id, _PS_HSCALE_1B, _PS_HSCALE_2B)) 5737 _ID(id, _PS_HSCALE_1B, _PS_HSCALE_2B))
5675#define SKL_PS_VPHASE(pipe, id) _PIPE(pipe, \ 5738#define SKL_PS_VPHASE(pipe, id) _MMIO_PIPE(pipe, \
5676 _ID(id, _PS_VPHASE_1A, _PS_VPHASE_2A), \ 5739 _ID(id, _PS_VPHASE_1A, _PS_VPHASE_2A), \
5677 _ID(id, _PS_VPHASE_1B, _PS_VPHASE_2B)) 5740 _ID(id, _PS_VPHASE_1B, _PS_VPHASE_2B))
5678#define SKL_PS_HPHASE(pipe, id) _PIPE(pipe, \ 5741#define SKL_PS_HPHASE(pipe, id) _MMIO_PIPE(pipe, \
5679 _ID(id, _PS_HPHASE_1A, _PS_HPHASE_2A), \ 5742 _ID(id, _PS_HPHASE_1A, _PS_HPHASE_2A), \
5680 _ID(id, _PS_HPHASE_1B, _PS_HPHASE_2B)) 5743 _ID(id, _PS_HPHASE_1B, _PS_HPHASE_2B))
5681#define SKL_PS_ECC_STAT(pipe, id) _PIPE(pipe, \ 5744#define SKL_PS_ECC_STAT(pipe, id) _MMIO_PIPE(pipe, \
5682 _ID(id, _PS_ECC_STAT_1A, _PS_ECC_STAT_2A), \ 5745 _ID(id, _PS_ECC_STAT_1A, _PS_ECC_STAT_2A), \
5683 _ID(id, _PS_ECC_STAT_1B, _PS_ECC_STAT_2B) 5746 _ID(id, _PS_ECC_STAT_1B, _PS_ECC_STAT_2B))
5684 5747
5685/* legacy palette */ 5748/* legacy palette */
5686#define _LGC_PALETTE_A 0x4a000 5749#define _LGC_PALETTE_A 0x4a000
5687#define _LGC_PALETTE_B 0x4a800 5750#define _LGC_PALETTE_B 0x4a800
5688#define LGC_PALETTE(pipe, i) (_PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B) + (i) * 4) 5751#define LGC_PALETTE(pipe, i) _MMIO(_PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B) + (i) * 4)
5689 5752
5690#define _GAMMA_MODE_A 0x4a480 5753#define _GAMMA_MODE_A 0x4a480
5691#define _GAMMA_MODE_B 0x4ac80 5754#define _GAMMA_MODE_B 0x4ac80
5692#define GAMMA_MODE(pipe) _PIPE(pipe, _GAMMA_MODE_A, _GAMMA_MODE_B) 5755#define GAMMA_MODE(pipe) _MMIO_PIPE(pipe, _GAMMA_MODE_A, _GAMMA_MODE_B)
5693#define GAMMA_MODE_MODE_MASK (3 << 0) 5756#define GAMMA_MODE_MODE_MASK (3 << 0)
5694#define GAMMA_MODE_MODE_8BIT (0 << 0) 5757#define GAMMA_MODE_MODE_8BIT (0 << 0)
5695#define GAMMA_MODE_MODE_10BIT (1 << 0) 5758#define GAMMA_MODE_MODE_10BIT (1 << 0)
5696#define GAMMA_MODE_MODE_12BIT (2 << 0) 5759#define GAMMA_MODE_MODE_12BIT (2 << 0)
5697#define GAMMA_MODE_MODE_SPLIT (3 << 0) 5760#define GAMMA_MODE_MODE_SPLIT (3 << 0)
5698 5761
5762/* DMC/CSR */
5763#define CSR_PROGRAM(i) _MMIO(0x80000 + (i) * 4)
5764#define CSR_SSP_BASE_ADDR_GEN9 0x00002FC0
5765#define CSR_HTP_ADDR_SKL 0x00500034
5766#define CSR_SSP_BASE _MMIO(0x8F074)
5767#define CSR_HTP_SKL _MMIO(0x8F004)
5768#define CSR_LAST_WRITE _MMIO(0x8F034)
5769#define CSR_LAST_WRITE_VALUE 0xc003b400
5770/* MMIO address range for CSR program (0x80000 - 0x82FFF) */
5771#define CSR_MMIO_START_RANGE 0x80000
5772#define CSR_MMIO_END_RANGE 0x8FFFF
5773#define SKL_CSR_DC3_DC5_COUNT _MMIO(0x80030)
5774#define SKL_CSR_DC5_DC6_COUNT _MMIO(0x8002C)
5775#define BXT_CSR_DC3_DC5_COUNT _MMIO(0x80038)
5776
5699/* interrupts */ 5777/* interrupts */
5700#define DE_MASTER_IRQ_CONTROL (1 << 31) 5778#define DE_MASTER_IRQ_CONTROL (1 << 31)
5701#define DE_SPRITEB_FLIP_DONE (1 << 29) 5779#define DE_SPRITEB_FLIP_DONE (1 << 29)
@@ -5747,20 +5825,20 @@ enum skl_disp_power_wells {
5747#define DE_PIPEA_VBLANK_IVB (1<<0) 5825#define DE_PIPEA_VBLANK_IVB (1<<0)
5748#define DE_PIPE_VBLANK_IVB(pipe) (1 << ((pipe) * 5)) 5826#define DE_PIPE_VBLANK_IVB(pipe) (1 << ((pipe) * 5))
5749 5827
5750#define VLV_MASTER_IER 0x4400c /* Gunit master IER */ 5828#define VLV_MASTER_IER _MMIO(0x4400c) /* Gunit master IER */
5751#define MASTER_INTERRUPT_ENABLE (1<<31) 5829#define MASTER_INTERRUPT_ENABLE (1<<31)
5752 5830
5753#define DEISR 0x44000 5831#define DEISR _MMIO(0x44000)
5754#define DEIMR 0x44004 5832#define DEIMR _MMIO(0x44004)
5755#define DEIIR 0x44008 5833#define DEIIR _MMIO(0x44008)
5756#define DEIER 0x4400c 5834#define DEIER _MMIO(0x4400c)
5757 5835
5758#define GTISR 0x44010 5836#define GTISR _MMIO(0x44010)
5759#define GTIMR 0x44014 5837#define GTIMR _MMIO(0x44014)
5760#define GTIIR 0x44018 5838#define GTIIR _MMIO(0x44018)
5761#define GTIER 0x4401c 5839#define GTIER _MMIO(0x4401c)
5762 5840
5763#define GEN8_MASTER_IRQ 0x44200 5841#define GEN8_MASTER_IRQ _MMIO(0x44200)
5764#define GEN8_MASTER_IRQ_CONTROL (1<<31) 5842#define GEN8_MASTER_IRQ_CONTROL (1<<31)
5765#define GEN8_PCU_IRQ (1<<30) 5843#define GEN8_PCU_IRQ (1<<30)
5766#define GEN8_DE_PCH_IRQ (1<<23) 5844#define GEN8_DE_PCH_IRQ (1<<23)
@@ -5777,10 +5855,10 @@ enum skl_disp_power_wells {
5777#define GEN8_GT_BCS_IRQ (1<<1) 5855#define GEN8_GT_BCS_IRQ (1<<1)
5778#define GEN8_GT_RCS_IRQ (1<<0) 5856#define GEN8_GT_RCS_IRQ (1<<0)
5779 5857
5780#define GEN8_GT_ISR(which) (0x44300 + (0x10 * (which))) 5858#define GEN8_GT_ISR(which) _MMIO(0x44300 + (0x10 * (which)))
5781#define GEN8_GT_IMR(which) (0x44304 + (0x10 * (which))) 5859#define GEN8_GT_IMR(which) _MMIO(0x44304 + (0x10 * (which)))
5782#define GEN8_GT_IIR(which) (0x44308 + (0x10 * (which))) 5860#define GEN8_GT_IIR(which) _MMIO(0x44308 + (0x10 * (which)))
5783#define GEN8_GT_IER(which) (0x4430c + (0x10 * (which))) 5861#define GEN8_GT_IER(which) _MMIO(0x4430c + (0x10 * (which)))
5784 5862
5785#define GEN8_RCS_IRQ_SHIFT 0 5863#define GEN8_RCS_IRQ_SHIFT 0
5786#define GEN8_BCS_IRQ_SHIFT 16 5864#define GEN8_BCS_IRQ_SHIFT 16
@@ -5789,10 +5867,10 @@ enum skl_disp_power_wells {
5789#define GEN8_VECS_IRQ_SHIFT 0 5867#define GEN8_VECS_IRQ_SHIFT 0
5790#define GEN8_WD_IRQ_SHIFT 16 5868#define GEN8_WD_IRQ_SHIFT 16
5791 5869
5792#define GEN8_DE_PIPE_ISR(pipe) (0x44400 + (0x10 * (pipe))) 5870#define GEN8_DE_PIPE_ISR(pipe) _MMIO(0x44400 + (0x10 * (pipe)))
5793#define GEN8_DE_PIPE_IMR(pipe) (0x44404 + (0x10 * (pipe))) 5871#define GEN8_DE_PIPE_IMR(pipe) _MMIO(0x44404 + (0x10 * (pipe)))
5794#define GEN8_DE_PIPE_IIR(pipe) (0x44408 + (0x10 * (pipe))) 5872#define GEN8_DE_PIPE_IIR(pipe) _MMIO(0x44408 + (0x10 * (pipe)))
5795#define GEN8_DE_PIPE_IER(pipe) (0x4440c + (0x10 * (pipe))) 5873#define GEN8_DE_PIPE_IER(pipe) _MMIO(0x4440c + (0x10 * (pipe)))
5796#define GEN8_PIPE_FIFO_UNDERRUN (1 << 31) 5874#define GEN8_PIPE_FIFO_UNDERRUN (1 << 31)
5797#define GEN8_PIPE_CDCLK_CRC_ERROR (1 << 29) 5875#define GEN8_PIPE_CDCLK_CRC_ERROR (1 << 29)
5798#define GEN8_PIPE_CDCLK_CRC_DONE (1 << 28) 5876#define GEN8_PIPE_CDCLK_CRC_DONE (1 << 28)
@@ -5825,10 +5903,10 @@ enum skl_disp_power_wells {
5825 GEN9_PIPE_PLANE2_FAULT | \ 5903 GEN9_PIPE_PLANE2_FAULT | \
5826 GEN9_PIPE_PLANE1_FAULT) 5904 GEN9_PIPE_PLANE1_FAULT)
5827 5905
5828#define GEN8_DE_PORT_ISR 0x44440 5906#define GEN8_DE_PORT_ISR _MMIO(0x44440)
5829#define GEN8_DE_PORT_IMR 0x44444 5907#define GEN8_DE_PORT_IMR _MMIO(0x44444)
5830#define GEN8_DE_PORT_IIR 0x44448 5908#define GEN8_DE_PORT_IIR _MMIO(0x44448)
5831#define GEN8_DE_PORT_IER 0x4444c 5909#define GEN8_DE_PORT_IER _MMIO(0x4444c)
5832#define GEN9_AUX_CHANNEL_D (1 << 27) 5910#define GEN9_AUX_CHANNEL_D (1 << 27)
5833#define GEN9_AUX_CHANNEL_C (1 << 26) 5911#define GEN9_AUX_CHANNEL_C (1 << 26)
5834#define GEN9_AUX_CHANNEL_B (1 << 25) 5912#define GEN9_AUX_CHANNEL_B (1 << 25)
@@ -5842,23 +5920,23 @@ enum skl_disp_power_wells {
5842#define BXT_DE_PORT_GMBUS (1 << 1) 5920#define BXT_DE_PORT_GMBUS (1 << 1)
5843#define GEN8_AUX_CHANNEL_A (1 << 0) 5921#define GEN8_AUX_CHANNEL_A (1 << 0)
5844 5922
5845#define GEN8_DE_MISC_ISR 0x44460 5923#define GEN8_DE_MISC_ISR _MMIO(0x44460)
5846#define GEN8_DE_MISC_IMR 0x44464 5924#define GEN8_DE_MISC_IMR _MMIO(0x44464)
5847#define GEN8_DE_MISC_IIR 0x44468 5925#define GEN8_DE_MISC_IIR _MMIO(0x44468)
5848#define GEN8_DE_MISC_IER 0x4446c 5926#define GEN8_DE_MISC_IER _MMIO(0x4446c)
5849#define GEN8_DE_MISC_GSE (1 << 27) 5927#define GEN8_DE_MISC_GSE (1 << 27)
5850 5928
5851#define GEN8_PCU_ISR 0x444e0 5929#define GEN8_PCU_ISR _MMIO(0x444e0)
5852#define GEN8_PCU_IMR 0x444e4 5930#define GEN8_PCU_IMR _MMIO(0x444e4)
5853#define GEN8_PCU_IIR 0x444e8 5931#define GEN8_PCU_IIR _MMIO(0x444e8)
5854#define GEN8_PCU_IER 0x444ec 5932#define GEN8_PCU_IER _MMIO(0x444ec)
5855 5933
5856#define ILK_DISPLAY_CHICKEN2 0x42004 5934#define ILK_DISPLAY_CHICKEN2 _MMIO(0x42004)
5857/* Required on all Ironlake and Sandybridge according to the B-Spec. */ 5935/* Required on all Ironlake and Sandybridge according to the B-Spec. */
5858#define ILK_ELPIN_409_SELECT (1 << 25) 5936#define ILK_ELPIN_409_SELECT (1 << 25)
5859#define ILK_DPARB_GATE (1<<22) 5937#define ILK_DPARB_GATE (1<<22)
5860#define ILK_VSDPFD_FULL (1<<21) 5938#define ILK_VSDPFD_FULL (1<<21)
5861#define FUSE_STRAP 0x42014 5939#define FUSE_STRAP _MMIO(0x42014)
5862#define ILK_INTERNAL_GRAPHICS_DISABLE (1 << 31) 5940#define ILK_INTERNAL_GRAPHICS_DISABLE (1 << 31)
5863#define ILK_INTERNAL_DISPLAY_DISABLE (1 << 30) 5941#define ILK_INTERNAL_DISPLAY_DISABLE (1 << 30)
5864#define ILK_DISPLAY_DEBUG_DISABLE (1 << 29) 5942#define ILK_DISPLAY_DEBUG_DISABLE (1 << 29)
@@ -5867,18 +5945,18 @@ enum skl_disp_power_wells {
5867#define HSW_CDCLK_LIMIT (1 << 24) 5945#define HSW_CDCLK_LIMIT (1 << 24)
5868#define ILK_DESKTOP (1 << 23) 5946#define ILK_DESKTOP (1 << 23)
5869 5947
5870#define ILK_DSPCLK_GATE_D 0x42020 5948#define ILK_DSPCLK_GATE_D _MMIO(0x42020)
5871#define ILK_VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) 5949#define ILK_VRHUNIT_CLOCK_GATE_DISABLE (1 << 28)
5872#define ILK_DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9) 5950#define ILK_DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9)
5873#define ILK_DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8) 5951#define ILK_DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8)
5874#define ILK_DPFDUNIT_CLOCK_GATE_ENABLE (1 << 7) 5952#define ILK_DPFDUNIT_CLOCK_GATE_ENABLE (1 << 7)
5875#define ILK_DPARBUNIT_CLOCK_GATE_ENABLE (1 << 5) 5953#define ILK_DPARBUNIT_CLOCK_GATE_ENABLE (1 << 5)
5876 5954
5877#define IVB_CHICKEN3 0x4200c 5955#define IVB_CHICKEN3 _MMIO(0x4200c)
5878# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5) 5956# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5)
5879# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2) 5957# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2)
5880 5958
5881#define CHICKEN_PAR1_1 0x42080 5959#define CHICKEN_PAR1_1 _MMIO(0x42080)
5882#define DPA_MASK_VBLANK_SRD (1 << 15) 5960#define DPA_MASK_VBLANK_SRD (1 << 15)
5883#define FORCE_ARB_IDLE_PLANES (1 << 14) 5961#define FORCE_ARB_IDLE_PLANES (1 << 14)
5884 5962
@@ -5886,70 +5964,70 @@ enum skl_disp_power_wells {
5886#define _CHICKEN_PIPESL_1_B 0x420b4 5964#define _CHICKEN_PIPESL_1_B 0x420b4
5887#define HSW_FBCQ_DIS (1 << 22) 5965#define HSW_FBCQ_DIS (1 << 22)
5888#define BDW_DPRS_MASK_VBLANK_SRD (1 << 0) 5966#define BDW_DPRS_MASK_VBLANK_SRD (1 << 0)
5889#define CHICKEN_PIPESL_1(pipe) _PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B) 5967#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
5890 5968
5891#define DISP_ARB_CTL 0x45000 5969#define DISP_ARB_CTL _MMIO(0x45000)
5892#define DISP_TILE_SURFACE_SWIZZLING (1<<13) 5970#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
5893#define DISP_FBC_WM_DIS (1<<15) 5971#define DISP_FBC_WM_DIS (1<<15)
5894#define DISP_ARB_CTL2 0x45004 5972#define DISP_ARB_CTL2 _MMIO(0x45004)
5895#define DISP_DATA_PARTITION_5_6 (1<<6) 5973#define DISP_DATA_PARTITION_5_6 (1<<6)
5896#define DBUF_CTL 0x45008 5974#define DBUF_CTL _MMIO(0x45008)
5897#define DBUF_POWER_REQUEST (1<<31) 5975#define DBUF_POWER_REQUEST (1<<31)
5898#define DBUF_POWER_STATE (1<<30) 5976#define DBUF_POWER_STATE (1<<30)
5899#define GEN7_MSG_CTL 0x45010 5977#define GEN7_MSG_CTL _MMIO(0x45010)
5900#define WAIT_FOR_PCH_RESET_ACK (1<<1) 5978#define WAIT_FOR_PCH_RESET_ACK (1<<1)
5901#define WAIT_FOR_PCH_FLR_ACK (1<<0) 5979#define WAIT_FOR_PCH_FLR_ACK (1<<0)
5902#define HSW_NDE_RSTWRN_OPT 0x46408 5980#define HSW_NDE_RSTWRN_OPT _MMIO(0x46408)
5903#define RESET_PCH_HANDSHAKE_ENABLE (1<<4) 5981#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
5904 5982
5905#define SKL_DFSM 0x51000 5983#define SKL_DFSM _MMIO(0x51000)
5906#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23) 5984#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23)
5907#define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23) 5985#define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23)
5908#define SKL_DFSM_CDCLK_LIMIT_540 (1 << 23) 5986#define SKL_DFSM_CDCLK_LIMIT_540 (1 << 23)
5909#define SKL_DFSM_CDCLK_LIMIT_450 (2 << 23) 5987#define SKL_DFSM_CDCLK_LIMIT_450 (2 << 23)
5910#define SKL_DFSM_CDCLK_LIMIT_337_5 (3 << 23) 5988#define SKL_DFSM_CDCLK_LIMIT_337_5 (3 << 23)
5911 5989
5912#define FF_SLICE_CS_CHICKEN2 0x20e4 5990#define FF_SLICE_CS_CHICKEN2 _MMIO(0x20e4)
5913#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8) 5991#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8)
5914 5992
5915/* GEN7 chicken */ 5993/* GEN7 chicken */
5916#define GEN7_COMMON_SLICE_CHICKEN1 0x7010 5994#define GEN7_COMMON_SLICE_CHICKEN1 _MMIO(0x7010)
5917# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26)) 5995# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
5918# define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14) 5996# define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14)
5919#define COMMON_SLICE_CHICKEN2 0x7014 5997#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014)
5920# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0) 5998# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
5921 5999
5922#define HIZ_CHICKEN 0x7018 6000#define HIZ_CHICKEN _MMIO(0x7018)
5923# define CHV_HZ_8X8_MODE_IN_1X (1<<15) 6001# define CHV_HZ_8X8_MODE_IN_1X (1<<15)
5924# define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE (1<<3) 6002# define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE (1<<3)
5925 6003
5926#define GEN9_SLICE_COMMON_ECO_CHICKEN0 0x7308 6004#define GEN9_SLICE_COMMON_ECO_CHICKEN0 _MMIO(0x7308)
5927#define DISABLE_PIXEL_MASK_CAMMING (1<<14) 6005#define DISABLE_PIXEL_MASK_CAMMING (1<<14)
5928 6006
5929#define GEN7_L3SQCREG1 0xB010 6007#define GEN7_L3SQCREG1 _MMIO(0xB010)
5930#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000 6008#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
5931 6009
5932#define GEN8_L3SQCREG1 0xB100 6010#define GEN8_L3SQCREG1 _MMIO(0xB100)
5933#define BDW_WA_L3SQCREG1_DEFAULT 0x784000 6011#define BDW_WA_L3SQCREG1_DEFAULT 0x784000
5934 6012
5935#define GEN7_L3CNTLREG1 0xB01C 6013#define GEN7_L3CNTLREG1 _MMIO(0xB01C)
5936#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C 6014#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
5937#define GEN7_L3AGDIS (1<<19) 6015#define GEN7_L3AGDIS (1<<19)
5938#define GEN7_L3CNTLREG2 0xB020 6016#define GEN7_L3CNTLREG2 _MMIO(0xB020)
5939#define GEN7_L3CNTLREG3 0xB024 6017#define GEN7_L3CNTLREG3 _MMIO(0xB024)
5940 6018
5941#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030 6019#define GEN7_L3_CHICKEN_MODE_REGISTER _MMIO(0xB030)
5942#define GEN7_WA_L3_CHICKEN_MODE 0x20000000 6020#define GEN7_WA_L3_CHICKEN_MODE 0x20000000
5943 6021
5944#define GEN7_L3SQCREG4 0xb034 6022#define GEN7_L3SQCREG4 _MMIO(0xb034)
5945#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27) 6023#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27)
5946 6024
5947#define GEN8_L3SQCREG4 0xb118 6025#define GEN8_L3SQCREG4 _MMIO(0xb118)
5948#define GEN8_LQSC_RO_PERF_DIS (1<<27) 6026#define GEN8_LQSC_RO_PERF_DIS (1<<27)
5949#define GEN8_LQSC_FLUSH_COHERENT_LINES (1<<21) 6027#define GEN8_LQSC_FLUSH_COHERENT_LINES (1<<21)
5950 6028
5951/* GEN8 chicken */ 6029/* GEN8 chicken */
5952#define HDC_CHICKEN0 0x7300 6030#define HDC_CHICKEN0 _MMIO(0x7300)
5953#define HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE (1<<15) 6031#define HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE (1<<15)
5954#define HDC_FENCE_DEST_SLM_DISABLE (1<<14) 6032#define HDC_FENCE_DEST_SLM_DISABLE (1<<14)
5955#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11) 6033#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11)
@@ -5958,17 +6036,17 @@ enum skl_disp_power_wells {
5958#define HDC_BARRIER_PERFORMANCE_DISABLE (1<<10) 6036#define HDC_BARRIER_PERFORMANCE_DISABLE (1<<10)
5959 6037
5960/* GEN9 chicken */ 6038/* GEN9 chicken */
5961#define SLICE_ECO_CHICKEN0 0x7308 6039#define SLICE_ECO_CHICKEN0 _MMIO(0x7308)
5962#define PIXEL_MASK_CAMMING_DISABLE (1 << 14) 6040#define PIXEL_MASK_CAMMING_DISABLE (1 << 14)
5963 6041
5964/* WaCatErrorRejectionIssue */ 6042/* WaCatErrorRejectionIssue */
5965#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030 6043#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG _MMIO(0x9030)
5966#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11) 6044#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
5967 6045
5968#define HSW_SCRATCH1 0xb038 6046#define HSW_SCRATCH1 _MMIO(0xb038)
5969#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27) 6047#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27)
5970 6048
5971#define BDW_SCRATCH1 0xb11c 6049#define BDW_SCRATCH1 _MMIO(0xb11c)
5972#define GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE (1<<2) 6050#define GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE (1<<2)
5973 6051
5974/* PCH */ 6052/* PCH */
@@ -6062,12 +6140,12 @@ enum skl_disp_power_wells {
6062 SDE_FDI_RXB_CPT | \ 6140 SDE_FDI_RXB_CPT | \
6063 SDE_FDI_RXA_CPT) 6141 SDE_FDI_RXA_CPT)
6064 6142
6065#define SDEISR 0xc4000 6143#define SDEISR _MMIO(0xc4000)
6066#define SDEIMR 0xc4004 6144#define SDEIMR _MMIO(0xc4004)
6067#define SDEIIR 0xc4008 6145#define SDEIIR _MMIO(0xc4008)
6068#define SDEIER 0xc400c 6146#define SDEIER _MMIO(0xc400c)
6069 6147
6070#define SERR_INT 0xc4040 6148#define SERR_INT _MMIO(0xc4040)
6071#define SERR_INT_POISON (1<<31) 6149#define SERR_INT_POISON (1<<31)
6072#define SERR_INT_TRANS_C_FIFO_UNDERRUN (1<<6) 6150#define SERR_INT_TRANS_C_FIFO_UNDERRUN (1<<6)
6073#define SERR_INT_TRANS_B_FIFO_UNDERRUN (1<<3) 6151#define SERR_INT_TRANS_B_FIFO_UNDERRUN (1<<3)
@@ -6075,7 +6153,7 @@ enum skl_disp_power_wells {
6075#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<((pipe)*3)) 6153#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<((pipe)*3))
6076 6154
6077/* digital port hotplug */ 6155/* digital port hotplug */
6078#define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */ 6156#define PCH_PORT_HOTPLUG _MMIO(0xc4030) /* SHOTPLUG_CTL */
6079#define PORTA_HOTPLUG_ENABLE (1 << 28) /* LPT:LP+ & BXT */ 6157#define PORTA_HOTPLUG_ENABLE (1 << 28) /* LPT:LP+ & BXT */
6080#define PORTA_HOTPLUG_STATUS_MASK (3 << 24) /* SPT+ & BXT */ 6158#define PORTA_HOTPLUG_STATUS_MASK (3 << 24) /* SPT+ & BXT */
6081#define PORTA_HOTPLUG_NO_DETECT (0 << 24) /* SPT+ & BXT */ 6159#define PORTA_HOTPLUG_NO_DETECT (0 << 24) /* SPT+ & BXT */
@@ -6112,42 +6190,42 @@ enum skl_disp_power_wells {
6112#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0) 6190#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
6113#define PORTB_HOTPLUG_LONG_DETECT (2 << 0) 6191#define PORTB_HOTPLUG_LONG_DETECT (2 << 0)
6114 6192
6115#define PCH_PORT_HOTPLUG2 0xc403C /* SHOTPLUG_CTL2 SPT+ */ 6193#define PCH_PORT_HOTPLUG2 _MMIO(0xc403C) /* SHOTPLUG_CTL2 SPT+ */
6116#define PORTE_HOTPLUG_ENABLE (1 << 4) 6194#define PORTE_HOTPLUG_ENABLE (1 << 4)
6117#define PORTE_HOTPLUG_STATUS_MASK (3 << 0) 6195#define PORTE_HOTPLUG_STATUS_MASK (3 << 0)
6118#define PORTE_HOTPLUG_NO_DETECT (0 << 0) 6196#define PORTE_HOTPLUG_NO_DETECT (0 << 0)
6119#define PORTE_HOTPLUG_SHORT_DETECT (1 << 0) 6197#define PORTE_HOTPLUG_SHORT_DETECT (1 << 0)
6120#define PORTE_HOTPLUG_LONG_DETECT (2 << 0) 6198#define PORTE_HOTPLUG_LONG_DETECT (2 << 0)
6121 6199
6122#define PCH_GPIOA 0xc5010 6200#define PCH_GPIOA _MMIO(0xc5010)
6123#define PCH_GPIOB 0xc5014 6201#define PCH_GPIOB _MMIO(0xc5014)
6124#define PCH_GPIOC 0xc5018 6202#define PCH_GPIOC _MMIO(0xc5018)
6125#define PCH_GPIOD 0xc501c 6203#define PCH_GPIOD _MMIO(0xc501c)
6126#define PCH_GPIOE 0xc5020 6204#define PCH_GPIOE _MMIO(0xc5020)
6127#define PCH_GPIOF 0xc5024 6205#define PCH_GPIOF _MMIO(0xc5024)
6128 6206
6129#define PCH_GMBUS0 0xc5100 6207#define PCH_GMBUS0 _MMIO(0xc5100)
6130#define PCH_GMBUS1 0xc5104 6208#define PCH_GMBUS1 _MMIO(0xc5104)
6131#define PCH_GMBUS2 0xc5108 6209#define PCH_GMBUS2 _MMIO(0xc5108)
6132#define PCH_GMBUS3 0xc510c 6210#define PCH_GMBUS3 _MMIO(0xc510c)
6133#define PCH_GMBUS4 0xc5110 6211#define PCH_GMBUS4 _MMIO(0xc5110)
6134#define PCH_GMBUS5 0xc5120 6212#define PCH_GMBUS5 _MMIO(0xc5120)
6135 6213
6136#define _PCH_DPLL_A 0xc6014 6214#define _PCH_DPLL_A 0xc6014
6137#define _PCH_DPLL_B 0xc6018 6215#define _PCH_DPLL_B 0xc6018
6138#define PCH_DPLL(pll) (pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B) 6216#define PCH_DPLL(pll) _MMIO(pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
6139 6217
6140#define _PCH_FPA0 0xc6040 6218#define _PCH_FPA0 0xc6040
6141#define FP_CB_TUNE (0x3<<22) 6219#define FP_CB_TUNE (0x3<<22)
6142#define _PCH_FPA1 0xc6044 6220#define _PCH_FPA1 0xc6044
6143#define _PCH_FPB0 0xc6048 6221#define _PCH_FPB0 0xc6048
6144#define _PCH_FPB1 0xc604c 6222#define _PCH_FPB1 0xc604c
6145#define PCH_FP0(pll) (pll == 0 ? _PCH_FPA0 : _PCH_FPB0) 6223#define PCH_FP0(pll) _MMIO(pll == 0 ? _PCH_FPA0 : _PCH_FPB0)
6146#define PCH_FP1(pll) (pll == 0 ? _PCH_FPA1 : _PCH_FPB1) 6224#define PCH_FP1(pll) _MMIO(pll == 0 ? _PCH_FPA1 : _PCH_FPB1)
6147 6225
6148#define PCH_DPLL_TEST 0xc606c 6226#define PCH_DPLL_TEST _MMIO(0xc606c)
6149 6227
6150#define PCH_DREF_CONTROL 0xC6200 6228#define PCH_DREF_CONTROL _MMIO(0xC6200)
6151#define DREF_CONTROL_MASK 0x7fc3 6229#define DREF_CONTROL_MASK 0x7fc3
6152#define DREF_CPU_SOURCE_OUTPUT_DISABLE (0<<13) 6230#define DREF_CPU_SOURCE_OUTPUT_DISABLE (0<<13)
6153#define DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD (2<<13) 6231#define DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD (2<<13)
@@ -6170,19 +6248,19 @@ enum skl_disp_power_wells {
6170#define DREF_SSC4_DISABLE (0) 6248#define DREF_SSC4_DISABLE (0)
6171#define DREF_SSC4_ENABLE (1) 6249#define DREF_SSC4_ENABLE (1)
6172 6250
6173#define PCH_RAWCLK_FREQ 0xc6204 6251#define PCH_RAWCLK_FREQ _MMIO(0xc6204)
6174#define FDL_TP1_TIMER_SHIFT 12 6252#define FDL_TP1_TIMER_SHIFT 12
6175#define FDL_TP1_TIMER_MASK (3<<12) 6253#define FDL_TP1_TIMER_MASK (3<<12)
6176#define FDL_TP2_TIMER_SHIFT 10 6254#define FDL_TP2_TIMER_SHIFT 10
6177#define FDL_TP2_TIMER_MASK (3<<10) 6255#define FDL_TP2_TIMER_MASK (3<<10)
6178#define RAWCLK_FREQ_MASK 0x3ff 6256#define RAWCLK_FREQ_MASK 0x3ff
6179 6257
6180#define PCH_DPLL_TMR_CFG 0xc6208 6258#define PCH_DPLL_TMR_CFG _MMIO(0xc6208)
6181 6259
6182#define PCH_SSC4_PARMS 0xc6210 6260#define PCH_SSC4_PARMS _MMIO(0xc6210)
6183#define PCH_SSC4_AUX_PARMS 0xc6214 6261#define PCH_SSC4_AUX_PARMS _MMIO(0xc6214)
6184 6262
6185#define PCH_DPLL_SEL 0xc7000 6263#define PCH_DPLL_SEL _MMIO(0xc7000)
6186#define TRANS_DPLLB_SEL(pipe) (1 << ((pipe) * 4)) 6264#define TRANS_DPLLB_SEL(pipe) (1 << ((pipe) * 4))
6187#define TRANS_DPLLA_SEL(pipe) 0 6265#define TRANS_DPLLA_SEL(pipe) 0
6188#define TRANS_DPLL_ENABLE(pipe) (1 << ((pipe) * 4 + 3)) 6266#define TRANS_DPLL_ENABLE(pipe) (1 << ((pipe) * 4 + 3))
@@ -6230,79 +6308,73 @@ enum skl_disp_power_wells {
6230#define _VIDEO_DIP_DATA_B 0xe1208 6308#define _VIDEO_DIP_DATA_B 0xe1208
6231#define _VIDEO_DIP_GCP_B 0xe1210 6309#define _VIDEO_DIP_GCP_B 0xe1210
6232 6310
6233#define TVIDEO_DIP_CTL(pipe) _PIPE(pipe, _VIDEO_DIP_CTL_A, _VIDEO_DIP_CTL_B) 6311#define TVIDEO_DIP_CTL(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_CTL_A, _VIDEO_DIP_CTL_B)
6234#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B) 6312#define TVIDEO_DIP_DATA(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
6235#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B) 6313#define TVIDEO_DIP_GCP(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
6236 6314
6237/* Per-transcoder DIP controls (VLV) */ 6315/* Per-transcoder DIP controls (VLV) */
6238#define VLV_VIDEO_DIP_CTL_A (VLV_DISPLAY_BASE + 0x60200) 6316#define _VLV_VIDEO_DIP_CTL_A (VLV_DISPLAY_BASE + 0x60200)
6239#define VLV_VIDEO_DIP_DATA_A (VLV_DISPLAY_BASE + 0x60208) 6317#define _VLV_VIDEO_DIP_DATA_A (VLV_DISPLAY_BASE + 0x60208)
6240#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A (VLV_DISPLAY_BASE + 0x60210) 6318#define _VLV_VIDEO_DIP_GDCP_PAYLOAD_A (VLV_DISPLAY_BASE + 0x60210)
6241 6319
6242#define VLV_VIDEO_DIP_CTL_B (VLV_DISPLAY_BASE + 0x61170) 6320#define _VLV_VIDEO_DIP_CTL_B (VLV_DISPLAY_BASE + 0x61170)
6243#define VLV_VIDEO_DIP_DATA_B (VLV_DISPLAY_BASE + 0x61174) 6321#define _VLV_VIDEO_DIP_DATA_B (VLV_DISPLAY_BASE + 0x61174)
6244#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B (VLV_DISPLAY_BASE + 0x61178) 6322#define _VLV_VIDEO_DIP_GDCP_PAYLOAD_B (VLV_DISPLAY_BASE + 0x61178)
6245 6323
6246#define CHV_VIDEO_DIP_CTL_C (VLV_DISPLAY_BASE + 0x611f0) 6324#define _CHV_VIDEO_DIP_CTL_C (VLV_DISPLAY_BASE + 0x611f0)
6247#define CHV_VIDEO_DIP_DATA_C (VLV_DISPLAY_BASE + 0x611f4) 6325#define _CHV_VIDEO_DIP_DATA_C (VLV_DISPLAY_BASE + 0x611f4)
6248#define CHV_VIDEO_DIP_GDCP_PAYLOAD_C (VLV_DISPLAY_BASE + 0x611f8) 6326#define _CHV_VIDEO_DIP_GDCP_PAYLOAD_C (VLV_DISPLAY_BASE + 0x611f8)
6249 6327
6250#define VLV_TVIDEO_DIP_CTL(pipe) \ 6328#define VLV_TVIDEO_DIP_CTL(pipe) \
6251 _PIPE3((pipe), VLV_VIDEO_DIP_CTL_A, \ 6329 _MMIO_PIPE3((pipe), _VLV_VIDEO_DIP_CTL_A, \
6252 VLV_VIDEO_DIP_CTL_B, CHV_VIDEO_DIP_CTL_C) 6330 _VLV_VIDEO_DIP_CTL_B, _CHV_VIDEO_DIP_CTL_C)
6253#define VLV_TVIDEO_DIP_DATA(pipe) \ 6331#define VLV_TVIDEO_DIP_DATA(pipe) \
6254 _PIPE3((pipe), VLV_VIDEO_DIP_DATA_A, \ 6332 _MMIO_PIPE3((pipe), _VLV_VIDEO_DIP_DATA_A, \
6255 VLV_VIDEO_DIP_DATA_B, CHV_VIDEO_DIP_DATA_C) 6333 _VLV_VIDEO_DIP_DATA_B, _CHV_VIDEO_DIP_DATA_C)
6256#define VLV_TVIDEO_DIP_GCP(pipe) \ 6334#define VLV_TVIDEO_DIP_GCP(pipe) \
6257 _PIPE3((pipe), VLV_VIDEO_DIP_GDCP_PAYLOAD_A, \ 6335 _MMIO_PIPE3((pipe), _VLV_VIDEO_DIP_GDCP_PAYLOAD_A, \
6258 VLV_VIDEO_DIP_GDCP_PAYLOAD_B, CHV_VIDEO_DIP_GDCP_PAYLOAD_C) 6336 _VLV_VIDEO_DIP_GDCP_PAYLOAD_B, _CHV_VIDEO_DIP_GDCP_PAYLOAD_C)
6259 6337
6260/* Haswell DIP controls */ 6338/* Haswell DIP controls */
6261#define HSW_VIDEO_DIP_CTL_A 0x60200 6339
6262#define HSW_VIDEO_DIP_AVI_DATA_A 0x60220 6340#define _HSW_VIDEO_DIP_CTL_A 0x60200
6263#define HSW_VIDEO_DIP_VS_DATA_A 0x60260 6341#define _HSW_VIDEO_DIP_AVI_DATA_A 0x60220
6264#define HSW_VIDEO_DIP_SPD_DATA_A 0x602A0 6342#define _HSW_VIDEO_DIP_VS_DATA_A 0x60260
6265#define HSW_VIDEO_DIP_GMP_DATA_A 0x602E0 6343#define _HSW_VIDEO_DIP_SPD_DATA_A 0x602A0
6266#define HSW_VIDEO_DIP_VSC_DATA_A 0x60320 6344#define _HSW_VIDEO_DIP_GMP_DATA_A 0x602E0
6267#define HSW_VIDEO_DIP_AVI_ECC_A 0x60240 6345#define _HSW_VIDEO_DIP_VSC_DATA_A 0x60320
6268#define HSW_VIDEO_DIP_VS_ECC_A 0x60280 6346#define _HSW_VIDEO_DIP_AVI_ECC_A 0x60240
6269#define HSW_VIDEO_DIP_SPD_ECC_A 0x602C0 6347#define _HSW_VIDEO_DIP_VS_ECC_A 0x60280
6270#define HSW_VIDEO_DIP_GMP_ECC_A 0x60300 6348#define _HSW_VIDEO_DIP_SPD_ECC_A 0x602C0
6271#define HSW_VIDEO_DIP_VSC_ECC_A 0x60344 6349#define _HSW_VIDEO_DIP_GMP_ECC_A 0x60300
6272#define HSW_VIDEO_DIP_GCP_A 0x60210 6350#define _HSW_VIDEO_DIP_VSC_ECC_A 0x60344
6273 6351#define _HSW_VIDEO_DIP_GCP_A 0x60210
6274#define HSW_VIDEO_DIP_CTL_B 0x61200 6352
6275#define HSW_VIDEO_DIP_AVI_DATA_B 0x61220 6353#define _HSW_VIDEO_DIP_CTL_B 0x61200
6276#define HSW_VIDEO_DIP_VS_DATA_B 0x61260 6354#define _HSW_VIDEO_DIP_AVI_DATA_B 0x61220
6277#define HSW_VIDEO_DIP_SPD_DATA_B 0x612A0 6355#define _HSW_VIDEO_DIP_VS_DATA_B 0x61260
6278#define HSW_VIDEO_DIP_GMP_DATA_B 0x612E0 6356#define _HSW_VIDEO_DIP_SPD_DATA_B 0x612A0
6279#define HSW_VIDEO_DIP_VSC_DATA_B 0x61320 6357#define _HSW_VIDEO_DIP_GMP_DATA_B 0x612E0
6280#define HSW_VIDEO_DIP_BVI_ECC_B 0x61240 6358#define _HSW_VIDEO_DIP_VSC_DATA_B 0x61320
6281#define HSW_VIDEO_DIP_VS_ECC_B 0x61280 6359#define _HSW_VIDEO_DIP_BVI_ECC_B 0x61240
6282#define HSW_VIDEO_DIP_SPD_ECC_B 0x612C0 6360#define _HSW_VIDEO_DIP_VS_ECC_B 0x61280
6283#define HSW_VIDEO_DIP_GMP_ECC_B 0x61300 6361#define _HSW_VIDEO_DIP_SPD_ECC_B 0x612C0
6284#define HSW_VIDEO_DIP_VSC_ECC_B 0x61344 6362#define _HSW_VIDEO_DIP_GMP_ECC_B 0x61300
6285#define HSW_VIDEO_DIP_GCP_B 0x61210 6363#define _HSW_VIDEO_DIP_VSC_ECC_B 0x61344
6286 6364#define _HSW_VIDEO_DIP_GCP_B 0x61210
6287#define HSW_TVIDEO_DIP_CTL(trans) \ 6365
6288 _TRANSCODER2(trans, HSW_VIDEO_DIP_CTL_A) 6366#define HSW_TVIDEO_DIP_CTL(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_CTL_A)
6289#define HSW_TVIDEO_DIP_AVI_DATA(trans, i) \ 6367#define HSW_TVIDEO_DIP_AVI_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4)
6290 (_TRANSCODER2(trans, HSW_VIDEO_DIP_AVI_DATA_A) + (i) * 4) 6368#define HSW_TVIDEO_DIP_VS_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4)
6291#define HSW_TVIDEO_DIP_VS_DATA(trans, i) \ 6369#define HSW_TVIDEO_DIP_SPD_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4)
6292 (_TRANSCODER2(trans, HSW_VIDEO_DIP_VS_DATA_A) + (i) * 4) 6370#define HSW_TVIDEO_DIP_GCP(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GCP_A)
6293#define HSW_TVIDEO_DIP_SPD_DATA(trans, i) \ 6371#define HSW_TVIDEO_DIP_VSC_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4)
6294 (_TRANSCODER2(trans, HSW_VIDEO_DIP_SPD_DATA_A) + (i) * 4) 6372
6295#define HSW_TVIDEO_DIP_GCP(trans) \ 6373#define _HSW_STEREO_3D_CTL_A 0x70020
6296 _TRANSCODER2(trans, HSW_VIDEO_DIP_GCP_A) 6374#define S3D_ENABLE (1<<31)
6297#define HSW_TVIDEO_DIP_VSC_DATA(trans, i) \ 6375#define _HSW_STEREO_3D_CTL_B 0x71020
6298 (_TRANSCODER2(trans, HSW_VIDEO_DIP_VSC_DATA_A) + (i) * 4) 6376
6299 6377#define HSW_STEREO_3D_CTL(trans) _MMIO_PIPE2(trans, _HSW_STEREO_3D_CTL_A)
6300#define HSW_STEREO_3D_CTL_A 0x70020
6301#define S3D_ENABLE (1<<31)
6302#define HSW_STEREO_3D_CTL_B 0x71020
6303
6304#define HSW_STEREO_3D_CTL(trans) \
6305 _PIPE2(trans, HSW_STEREO_3D_CTL_A)
6306 6378
6307#define _PCH_TRANS_HTOTAL_B 0xe1000 6379#define _PCH_TRANS_HTOTAL_B 0xe1000
6308#define _PCH_TRANS_HBLANK_B 0xe1004 6380#define _PCH_TRANS_HBLANK_B 0xe1004
@@ -6310,16 +6382,15 @@ enum skl_disp_power_wells {
6310#define _PCH_TRANS_VTOTAL_B 0xe100c 6382#define _PCH_TRANS_VTOTAL_B 0xe100c
6311#define _PCH_TRANS_VBLANK_B 0xe1010 6383#define _PCH_TRANS_VBLANK_B 0xe1010
6312#define _PCH_TRANS_VSYNC_B 0xe1014 6384#define _PCH_TRANS_VSYNC_B 0xe1014
6313#define _PCH_TRANS_VSYNCSHIFT_B 0xe1028 6385#define _PCH_TRANS_VSYNCSHIFT_B 0xe1028
6314 6386
6315#define PCH_TRANS_HTOTAL(pipe) _PIPE(pipe, _PCH_TRANS_HTOTAL_A, _PCH_TRANS_HTOTAL_B) 6387#define PCH_TRANS_HTOTAL(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HTOTAL_A, _PCH_TRANS_HTOTAL_B)
6316#define PCH_TRANS_HBLANK(pipe) _PIPE(pipe, _PCH_TRANS_HBLANK_A, _PCH_TRANS_HBLANK_B) 6388#define PCH_TRANS_HBLANK(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HBLANK_A, _PCH_TRANS_HBLANK_B)
6317#define PCH_TRANS_HSYNC(pipe) _PIPE(pipe, _PCH_TRANS_HSYNC_A, _PCH_TRANS_HSYNC_B) 6389#define PCH_TRANS_HSYNC(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HSYNC_A, _PCH_TRANS_HSYNC_B)
6318#define PCH_TRANS_VTOTAL(pipe) _PIPE(pipe, _PCH_TRANS_VTOTAL_A, _PCH_TRANS_VTOTAL_B) 6390#define PCH_TRANS_VTOTAL(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VTOTAL_A, _PCH_TRANS_VTOTAL_B)
6319#define PCH_TRANS_VBLANK(pipe) _PIPE(pipe, _PCH_TRANS_VBLANK_A, _PCH_TRANS_VBLANK_B) 6391#define PCH_TRANS_VBLANK(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VBLANK_A, _PCH_TRANS_VBLANK_B)
6320#define PCH_TRANS_VSYNC(pipe) _PIPE(pipe, _PCH_TRANS_VSYNC_A, _PCH_TRANS_VSYNC_B) 6392#define PCH_TRANS_VSYNC(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VSYNC_A, _PCH_TRANS_VSYNC_B)
6321#define PCH_TRANS_VSYNCSHIFT(pipe) _PIPE(pipe, _PCH_TRANS_VSYNCSHIFT_A, \ 6393#define PCH_TRANS_VSYNCSHIFT(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VSYNCSHIFT_A, _PCH_TRANS_VSYNCSHIFT_B)
6322 _PCH_TRANS_VSYNCSHIFT_B)
6323 6394
6324#define _PCH_TRANSB_DATA_M1 0xe1030 6395#define _PCH_TRANSB_DATA_M1 0xe1030
6325#define _PCH_TRANSB_DATA_N1 0xe1034 6396#define _PCH_TRANSB_DATA_N1 0xe1034
@@ -6330,19 +6401,19 @@ enum skl_disp_power_wells {
6330#define _PCH_TRANSB_LINK_M2 0xe1048 6401#define _PCH_TRANSB_LINK_M2 0xe1048
6331#define _PCH_TRANSB_LINK_N2 0xe104c 6402#define _PCH_TRANSB_LINK_N2 0xe104c
6332 6403
6333#define PCH_TRANS_DATA_M1(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_M1, _PCH_TRANSB_DATA_M1) 6404#define PCH_TRANS_DATA_M1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_M1, _PCH_TRANSB_DATA_M1)
6334#define PCH_TRANS_DATA_N1(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_N1, _PCH_TRANSB_DATA_N1) 6405#define PCH_TRANS_DATA_N1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_N1, _PCH_TRANSB_DATA_N1)
6335#define PCH_TRANS_DATA_M2(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_M2, _PCH_TRANSB_DATA_M2) 6406#define PCH_TRANS_DATA_M2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_M2, _PCH_TRANSB_DATA_M2)
6336#define PCH_TRANS_DATA_N2(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_N2, _PCH_TRANSB_DATA_N2) 6407#define PCH_TRANS_DATA_N2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_N2, _PCH_TRANSB_DATA_N2)
6337#define PCH_TRANS_LINK_M1(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_M1, _PCH_TRANSB_LINK_M1) 6408#define PCH_TRANS_LINK_M1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_M1, _PCH_TRANSB_LINK_M1)
6338#define PCH_TRANS_LINK_N1(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_N1, _PCH_TRANSB_LINK_N1) 6409#define PCH_TRANS_LINK_N1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_N1, _PCH_TRANSB_LINK_N1)
6339#define PCH_TRANS_LINK_M2(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_M2, _PCH_TRANSB_LINK_M2) 6410#define PCH_TRANS_LINK_M2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_M2, _PCH_TRANSB_LINK_M2)
6340#define PCH_TRANS_LINK_N2(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_N2, _PCH_TRANSB_LINK_N2) 6411#define PCH_TRANS_LINK_N2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_N2, _PCH_TRANSB_LINK_N2)
6341 6412
6342#define _PCH_TRANSACONF 0xf0008 6413#define _PCH_TRANSACONF 0xf0008
6343#define _PCH_TRANSBCONF 0xf1008 6414#define _PCH_TRANSBCONF 0xf1008
6344#define PCH_TRANSCONF(pipe) _PIPE(pipe, _PCH_TRANSACONF, _PCH_TRANSBCONF) 6415#define PCH_TRANSCONF(pipe) _MMIO_PIPE(pipe, _PCH_TRANSACONF, _PCH_TRANSBCONF)
6345#define LPT_TRANSCONF _PCH_TRANSACONF /* lpt has only one transcoder */ 6416#define LPT_TRANSCONF PCH_TRANSCONF(PIPE_A) /* lpt has only one transcoder */
6346#define TRANS_DISABLE (0<<31) 6417#define TRANS_DISABLE (0<<31)
6347#define TRANS_ENABLE (1<<31) 6418#define TRANS_ENABLE (1<<31)
6348#define TRANS_STATE_MASK (1<<30) 6419#define TRANS_STATE_MASK (1<<30)
@@ -6363,47 +6434,47 @@ enum skl_disp_power_wells {
6363 6434
6364#define _TRANSA_CHICKEN1 0xf0060 6435#define _TRANSA_CHICKEN1 0xf0060
6365#define _TRANSB_CHICKEN1 0xf1060 6436#define _TRANSB_CHICKEN1 0xf1060
6366#define TRANS_CHICKEN1(pipe) _PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1) 6437#define TRANS_CHICKEN1(pipe) _MMIO_PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
6367#define TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE (1<<10) 6438#define TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE (1<<10)
6368#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1<<4) 6439#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1<<4)
6369#define _TRANSA_CHICKEN2 0xf0064 6440#define _TRANSA_CHICKEN2 0xf0064
6370#define _TRANSB_CHICKEN2 0xf1064 6441#define _TRANSB_CHICKEN2 0xf1064
6371#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2) 6442#define TRANS_CHICKEN2(pipe) _MMIO_PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
6372#define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31) 6443#define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31)
6373#define TRANS_CHICKEN2_FDI_POLARITY_REVERSED (1<<29) 6444#define TRANS_CHICKEN2_FDI_POLARITY_REVERSED (1<<29)
6374#define TRANS_CHICKEN2_FRAME_START_DELAY_MASK (3<<27) 6445#define TRANS_CHICKEN2_FRAME_START_DELAY_MASK (3<<27)
6375#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER (1<<26) 6446#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER (1<<26)
6376#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH (1<<25) 6447#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH (1<<25)
6377 6448
6378#define SOUTH_CHICKEN1 0xc2000 6449#define SOUTH_CHICKEN1 _MMIO(0xc2000)
6379#define FDIA_PHASE_SYNC_SHIFT_OVR 19 6450#define FDIA_PHASE_SYNC_SHIFT_OVR 19
6380#define FDIA_PHASE_SYNC_SHIFT_EN 18 6451#define FDIA_PHASE_SYNC_SHIFT_EN 18
6381#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2))) 6452#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
6382#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2))) 6453#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
6383#define FDI_BC_BIFURCATION_SELECT (1 << 12) 6454#define FDI_BC_BIFURCATION_SELECT (1 << 12)
6384#define SPT_PWM_GRANULARITY (1<<0) 6455#define SPT_PWM_GRANULARITY (1<<0)
6385#define SOUTH_CHICKEN2 0xc2004 6456#define SOUTH_CHICKEN2 _MMIO(0xc2004)
6386#define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13) 6457#define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13)
6387#define FDI_MPHY_IOSFSB_RESET_CTL (1<<12) 6458#define FDI_MPHY_IOSFSB_RESET_CTL (1<<12)
6388#define LPT_PWM_GRANULARITY (1<<5) 6459#define LPT_PWM_GRANULARITY (1<<5)
6389#define DPLS_EDP_PPS_FIX_DIS (1<<0) 6460#define DPLS_EDP_PPS_FIX_DIS (1<<0)
6390 6461
6391#define _FDI_RXA_CHICKEN 0xc200c 6462#define _FDI_RXA_CHICKEN 0xc200c
6392#define _FDI_RXB_CHICKEN 0xc2010 6463#define _FDI_RXB_CHICKEN 0xc2010
6393#define FDI_RX_PHASE_SYNC_POINTER_OVR (1<<1) 6464#define FDI_RX_PHASE_SYNC_POINTER_OVR (1<<1)
6394#define FDI_RX_PHASE_SYNC_POINTER_EN (1<<0) 6465#define FDI_RX_PHASE_SYNC_POINTER_EN (1<<0)
6395#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN) 6466#define FDI_RX_CHICKEN(pipe) _MMIO_PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN)
6396 6467
6397#define SOUTH_DSPCLK_GATE_D 0xc2020 6468#define SOUTH_DSPCLK_GATE_D _MMIO(0xc2020)
6398#define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1<<30) 6469#define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1<<30)
6399#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29) 6470#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
6400#define PCH_CPUNIT_CLOCK_GATE_DISABLE (1<<14) 6471#define PCH_CPUNIT_CLOCK_GATE_DISABLE (1<<14)
6401#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12) 6472#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12)
6402 6473
6403/* CPU: FDI_TX */ 6474/* CPU: FDI_TX */
6404#define _FDI_TXA_CTL 0x60100 6475#define _FDI_TXA_CTL 0x60100
6405#define _FDI_TXB_CTL 0x61100 6476#define _FDI_TXB_CTL 0x61100
6406#define FDI_TX_CTL(pipe) _PIPE(pipe, _FDI_TXA_CTL, _FDI_TXB_CTL) 6477#define FDI_TX_CTL(pipe) _MMIO_PIPE(pipe, _FDI_TXA_CTL, _FDI_TXB_CTL)
6407#define FDI_TX_DISABLE (0<<31) 6478#define FDI_TX_DISABLE (0<<31)
6408#define FDI_TX_ENABLE (1<<31) 6479#define FDI_TX_ENABLE (1<<31)
6409#define FDI_LINK_TRAIN_PATTERN_1 (0<<28) 6480#define FDI_LINK_TRAIN_PATTERN_1 (0<<28)
@@ -6453,7 +6524,7 @@ enum skl_disp_power_wells {
6453/* FDI_RX, FDI_X is hard-wired to Transcoder_X */ 6524/* FDI_RX, FDI_X is hard-wired to Transcoder_X */
6454#define _FDI_RXA_CTL 0xf000c 6525#define _FDI_RXA_CTL 0xf000c
6455#define _FDI_RXB_CTL 0xf100c 6526#define _FDI_RXB_CTL 0xf100c
6456#define FDI_RX_CTL(pipe) _PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL) 6527#define FDI_RX_CTL(pipe) _MMIO_PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL)
6457#define FDI_RX_ENABLE (1<<31) 6528#define FDI_RX_ENABLE (1<<31)
6458/* train, dp width same as FDI_TX */ 6529/* train, dp width same as FDI_TX */
6459#define FDI_FS_ERRC_ENABLE (1<<27) 6530#define FDI_FS_ERRC_ENABLE (1<<27)
@@ -6489,14 +6560,14 @@ enum skl_disp_power_wells {
6489#define FDI_RX_TP1_TO_TP2_48 (2<<20) 6560#define FDI_RX_TP1_TO_TP2_48 (2<<20)
6490#define FDI_RX_TP1_TO_TP2_64 (3<<20) 6561#define FDI_RX_TP1_TO_TP2_64 (3<<20)
6491#define FDI_RX_FDI_DELAY_90 (0x90<<0) 6562#define FDI_RX_FDI_DELAY_90 (0x90<<0)
6492#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC) 6563#define FDI_RX_MISC(pipe) _MMIO_PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
6493 6564
6494#define _FDI_RXA_TUSIZE1 0xf0030 6565#define _FDI_RXA_TUSIZE1 0xf0030
6495#define _FDI_RXA_TUSIZE2 0xf0038 6566#define _FDI_RXA_TUSIZE2 0xf0038
6496#define _FDI_RXB_TUSIZE1 0xf1030 6567#define _FDI_RXB_TUSIZE1 0xf1030
6497#define _FDI_RXB_TUSIZE2 0xf1038 6568#define _FDI_RXB_TUSIZE2 0xf1038
6498#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1) 6569#define FDI_RX_TUSIZE1(pipe) _MMIO_PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1)
6499#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2) 6570#define FDI_RX_TUSIZE2(pipe) _MMIO_PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
6500 6571
6501/* FDI_RX interrupt register format */ 6572/* FDI_RX interrupt register format */
6502#define FDI_RX_INTER_LANE_ALIGN (1<<10) 6573#define FDI_RX_INTER_LANE_ALIGN (1<<10)
@@ -6511,44 +6582,41 @@ enum skl_disp_power_wells {
6511#define FDI_RX_CROSS_CLOCK_OVERFLOW (1<<1) 6582#define FDI_RX_CROSS_CLOCK_OVERFLOW (1<<1)
6512#define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1<<0) 6583#define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1<<0)
6513 6584
6514#define _FDI_RXA_IIR 0xf0014 6585#define _FDI_RXA_IIR 0xf0014
6515#define _FDI_RXA_IMR 0xf0018 6586#define _FDI_RXA_IMR 0xf0018
6516#define _FDI_RXB_IIR 0xf1014 6587#define _FDI_RXB_IIR 0xf1014
6517#define _FDI_RXB_IMR 0xf1018 6588#define _FDI_RXB_IMR 0xf1018
6518#define FDI_RX_IIR(pipe) _PIPE(pipe, _FDI_RXA_IIR, _FDI_RXB_IIR) 6589#define FDI_RX_IIR(pipe) _MMIO_PIPE(pipe, _FDI_RXA_IIR, _FDI_RXB_IIR)
6519#define FDI_RX_IMR(pipe) _PIPE(pipe, _FDI_RXA_IMR, _FDI_RXB_IMR) 6590#define FDI_RX_IMR(pipe) _MMIO_PIPE(pipe, _FDI_RXA_IMR, _FDI_RXB_IMR)
6520 6591
6521#define FDI_PLL_CTL_1 0xfe000 6592#define FDI_PLL_CTL_1 _MMIO(0xfe000)
6522#define FDI_PLL_CTL_2 0xfe004 6593#define FDI_PLL_CTL_2 _MMIO(0xfe004)
6523 6594
6524#define PCH_LVDS 0xe1180 6595#define PCH_LVDS _MMIO(0xe1180)
6525#define LVDS_DETECTED (1 << 1) 6596#define LVDS_DETECTED (1 << 1)
6526 6597
6527/* vlv has 2 sets of panel control regs. */ 6598/* vlv has 2 sets of panel control regs. */
6528#define PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200) 6599#define _PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200)
6529#define PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204) 6600#define _PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204)
6530#define PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208) 6601#define _PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208)
6531#define PANEL_PORT_SELECT_VLV(port) ((port) << 30) 6602#define PANEL_PORT_SELECT_VLV(port) ((port) << 30)
6532#define PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c) 6603#define _PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c)
6533#define PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210) 6604#define _PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210)
6534 6605
6535#define PIPEB_PP_STATUS (VLV_DISPLAY_BASE + 0x61300) 6606#define _PIPEB_PP_STATUS (VLV_DISPLAY_BASE + 0x61300)
6536#define PIPEB_PP_CONTROL (VLV_DISPLAY_BASE + 0x61304) 6607#define _PIPEB_PP_CONTROL (VLV_DISPLAY_BASE + 0x61304)
6537#define PIPEB_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61308) 6608#define _PIPEB_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61308)
6538#define PIPEB_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6130c) 6609#define _PIPEB_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6130c)
6539#define PIPEB_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61310) 6610#define _PIPEB_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61310)
6540 6611
6541#define VLV_PIPE_PP_STATUS(pipe) _PIPE(pipe, PIPEA_PP_STATUS, PIPEB_PP_STATUS) 6612#define VLV_PIPE_PP_STATUS(pipe) _MMIO_PIPE(pipe, _PIPEA_PP_STATUS, _PIPEB_PP_STATUS)
6542#define VLV_PIPE_PP_CONTROL(pipe) _PIPE(pipe, PIPEA_PP_CONTROL, PIPEB_PP_CONTROL) 6613#define VLV_PIPE_PP_CONTROL(pipe) _MMIO_PIPE(pipe, _PIPEA_PP_CONTROL, _PIPEB_PP_CONTROL)
6543#define VLV_PIPE_PP_ON_DELAYS(pipe) \ 6614#define VLV_PIPE_PP_ON_DELAYS(pipe) _MMIO_PIPE(pipe, _PIPEA_PP_ON_DELAYS, _PIPEB_PP_ON_DELAYS)
6544 _PIPE(pipe, PIPEA_PP_ON_DELAYS, PIPEB_PP_ON_DELAYS) 6615#define VLV_PIPE_PP_OFF_DELAYS(pipe) _MMIO_PIPE(pipe, _PIPEA_PP_OFF_DELAYS, _PIPEB_PP_OFF_DELAYS)
6545#define VLV_PIPE_PP_OFF_DELAYS(pipe) \ 6616#define VLV_PIPE_PP_DIVISOR(pipe) _MMIO_PIPE(pipe, _PIPEA_PP_DIVISOR, _PIPEB_PP_DIVISOR)
6546 _PIPE(pipe, PIPEA_PP_OFF_DELAYS, PIPEB_PP_OFF_DELAYS) 6617
6547#define VLV_PIPE_PP_DIVISOR(pipe) \ 6618#define _PCH_PP_STATUS 0xc7200
6548 _PIPE(pipe, PIPEA_PP_DIVISOR, PIPEB_PP_DIVISOR) 6619#define _PCH_PP_CONTROL 0xc7204
6549
6550#define PCH_PP_STATUS 0xc7200
6551#define PCH_PP_CONTROL 0xc7204
6552#define PANEL_UNLOCK_REGS (0xabcd << 16) 6620#define PANEL_UNLOCK_REGS (0xabcd << 16)
6553#define PANEL_UNLOCK_MASK (0xffff << 16) 6621#define PANEL_UNLOCK_MASK (0xffff << 16)
6554#define BXT_POWER_CYCLE_DELAY_MASK (0x1f0) 6622#define BXT_POWER_CYCLE_DELAY_MASK (0x1f0)
@@ -6558,7 +6626,7 @@ enum skl_disp_power_wells {
6558#define PANEL_POWER_RESET (1 << 1) 6626#define PANEL_POWER_RESET (1 << 1)
6559#define PANEL_POWER_OFF (0 << 0) 6627#define PANEL_POWER_OFF (0 << 0)
6560#define PANEL_POWER_ON (1 << 0) 6628#define PANEL_POWER_ON (1 << 0)
6561#define PCH_PP_ON_DELAYS 0xc7208 6629#define _PCH_PP_ON_DELAYS 0xc7208
6562#define PANEL_PORT_SELECT_MASK (3 << 30) 6630#define PANEL_PORT_SELECT_MASK (3 << 30)
6563#define PANEL_PORT_SELECT_LVDS (0 << 30) 6631#define PANEL_PORT_SELECT_LVDS (0 << 30)
6564#define PANEL_PORT_SELECT_DPA (1 << 30) 6632#define PANEL_PORT_SELECT_DPA (1 << 30)
@@ -6569,52 +6637,64 @@ enum skl_disp_power_wells {
6569#define PANEL_LIGHT_ON_DELAY_MASK (0x1fff) 6637#define PANEL_LIGHT_ON_DELAY_MASK (0x1fff)
6570#define PANEL_LIGHT_ON_DELAY_SHIFT 0 6638#define PANEL_LIGHT_ON_DELAY_SHIFT 0
6571 6639
6572#define PCH_PP_OFF_DELAYS 0xc720c 6640#define _PCH_PP_OFF_DELAYS 0xc720c
6573#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000) 6641#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000)
6574#define PANEL_POWER_DOWN_DELAY_SHIFT 16 6642#define PANEL_POWER_DOWN_DELAY_SHIFT 16
6575#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff) 6643#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff)
6576#define PANEL_LIGHT_OFF_DELAY_SHIFT 0 6644#define PANEL_LIGHT_OFF_DELAY_SHIFT 0
6577 6645
6578#define PCH_PP_DIVISOR 0xc7210 6646#define _PCH_PP_DIVISOR 0xc7210
6579#define PP_REFERENCE_DIVIDER_MASK (0xffffff00) 6647#define PP_REFERENCE_DIVIDER_MASK (0xffffff00)
6580#define PP_REFERENCE_DIVIDER_SHIFT 8 6648#define PP_REFERENCE_DIVIDER_SHIFT 8
6581#define PANEL_POWER_CYCLE_DELAY_MASK (0x1f) 6649#define PANEL_POWER_CYCLE_DELAY_MASK (0x1f)
6582#define PANEL_POWER_CYCLE_DELAY_SHIFT 0 6650#define PANEL_POWER_CYCLE_DELAY_SHIFT 0
6583 6651
6652#define PCH_PP_STATUS _MMIO(_PCH_PP_STATUS)
6653#define PCH_PP_CONTROL _MMIO(_PCH_PP_CONTROL)
6654#define PCH_PP_ON_DELAYS _MMIO(_PCH_PP_ON_DELAYS)
6655#define PCH_PP_OFF_DELAYS _MMIO(_PCH_PP_OFF_DELAYS)
6656#define PCH_PP_DIVISOR _MMIO(_PCH_PP_DIVISOR)
6657
6584/* BXT PPS changes - 2nd set of PPS registers */ 6658/* BXT PPS changes - 2nd set of PPS registers */
6585#define _BXT_PP_STATUS2 0xc7300 6659#define _BXT_PP_STATUS2 0xc7300
6586#define _BXT_PP_CONTROL2 0xc7304 6660#define _BXT_PP_CONTROL2 0xc7304
6587#define _BXT_PP_ON_DELAYS2 0xc7308 6661#define _BXT_PP_ON_DELAYS2 0xc7308
6588#define _BXT_PP_OFF_DELAYS2 0xc730c 6662#define _BXT_PP_OFF_DELAYS2 0xc730c
6589 6663
6590#define BXT_PP_STATUS(n) _PIPE(n, PCH_PP_STATUS, _BXT_PP_STATUS2) 6664#define BXT_PP_STATUS(n) _MMIO_PIPE(n, _PCH_PP_STATUS, _BXT_PP_STATUS2)
6591#define BXT_PP_CONTROL(n) _PIPE(n, PCH_PP_CONTROL, _BXT_PP_CONTROL2) 6665#define BXT_PP_CONTROL(n) _MMIO_PIPE(n, _PCH_PP_CONTROL, _BXT_PP_CONTROL2)
6592#define BXT_PP_ON_DELAYS(n) _PIPE(n, PCH_PP_ON_DELAYS, _BXT_PP_ON_DELAYS2) 6666#define BXT_PP_ON_DELAYS(n) _MMIO_PIPE(n, _PCH_PP_ON_DELAYS, _BXT_PP_ON_DELAYS2)
6593#define BXT_PP_OFF_DELAYS(n) _PIPE(n, PCH_PP_OFF_DELAYS, _BXT_PP_OFF_DELAYS2) 6667#define BXT_PP_OFF_DELAYS(n) _MMIO_PIPE(n, _PCH_PP_OFF_DELAYS, _BXT_PP_OFF_DELAYS2)
6594 6668
6595#define PCH_DP_B 0xe4100 6669#define _PCH_DP_B 0xe4100
6596#define PCH_DPB_AUX_CH_CTL 0xe4110 6670#define PCH_DP_B _MMIO(_PCH_DP_B)
6597#define PCH_DPB_AUX_CH_DATA1 0xe4114 6671#define _PCH_DPB_AUX_CH_CTL 0xe4110
6598#define PCH_DPB_AUX_CH_DATA2 0xe4118 6672#define _PCH_DPB_AUX_CH_DATA1 0xe4114
6599#define PCH_DPB_AUX_CH_DATA3 0xe411c 6673#define _PCH_DPB_AUX_CH_DATA2 0xe4118
6600#define PCH_DPB_AUX_CH_DATA4 0xe4120 6674#define _PCH_DPB_AUX_CH_DATA3 0xe411c
6601#define PCH_DPB_AUX_CH_DATA5 0xe4124 6675#define _PCH_DPB_AUX_CH_DATA4 0xe4120
6602 6676#define _PCH_DPB_AUX_CH_DATA5 0xe4124
6603#define PCH_DP_C 0xe4200 6677
6604#define PCH_DPC_AUX_CH_CTL 0xe4210 6678#define _PCH_DP_C 0xe4200
6605#define PCH_DPC_AUX_CH_DATA1 0xe4214 6679#define PCH_DP_C _MMIO(_PCH_DP_C)
6606#define PCH_DPC_AUX_CH_DATA2 0xe4218 6680#define _PCH_DPC_AUX_CH_CTL 0xe4210
6607#define PCH_DPC_AUX_CH_DATA3 0xe421c 6681#define _PCH_DPC_AUX_CH_DATA1 0xe4214
6608#define PCH_DPC_AUX_CH_DATA4 0xe4220 6682#define _PCH_DPC_AUX_CH_DATA2 0xe4218
6609#define PCH_DPC_AUX_CH_DATA5 0xe4224 6683#define _PCH_DPC_AUX_CH_DATA3 0xe421c
6610 6684#define _PCH_DPC_AUX_CH_DATA4 0xe4220
6611#define PCH_DP_D 0xe4300 6685#define _PCH_DPC_AUX_CH_DATA5 0xe4224
6612#define PCH_DPD_AUX_CH_CTL 0xe4310 6686
6613#define PCH_DPD_AUX_CH_DATA1 0xe4314 6687#define _PCH_DP_D 0xe4300
6614#define PCH_DPD_AUX_CH_DATA2 0xe4318 6688#define PCH_DP_D _MMIO(_PCH_DP_D)
6615#define PCH_DPD_AUX_CH_DATA3 0xe431c 6689#define _PCH_DPD_AUX_CH_CTL 0xe4310
6616#define PCH_DPD_AUX_CH_DATA4 0xe4320 6690#define _PCH_DPD_AUX_CH_DATA1 0xe4314
6617#define PCH_DPD_AUX_CH_DATA5 0xe4324 6691#define _PCH_DPD_AUX_CH_DATA2 0xe4318
6692#define _PCH_DPD_AUX_CH_DATA3 0xe431c
6693#define _PCH_DPD_AUX_CH_DATA4 0xe4320
6694#define _PCH_DPD_AUX_CH_DATA5 0xe4324
6695
6696#define PCH_DP_AUX_CH_CTL(port) _MMIO_PORT((port) - PORT_B, _PCH_DPB_AUX_CH_CTL, _PCH_DPC_AUX_CH_CTL)
6697#define PCH_DP_AUX_CH_DATA(port, i) _MMIO(_PORT((port) - PORT_B, _PCH_DPB_AUX_CH_DATA1, _PCH_DPC_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
6618 6698
6619/* CPT */ 6699/* CPT */
6620#define PORT_TRANS_A_SEL_CPT 0 6700#define PORT_TRANS_A_SEL_CPT 0
@@ -6627,10 +6707,10 @@ enum skl_disp_power_wells {
6627#define SDVO_PORT_TO_PIPE_CHV(val) (((val) & (3<<24)) >> 24) 6707#define SDVO_PORT_TO_PIPE_CHV(val) (((val) & (3<<24)) >> 24)
6628#define DP_PORT_TO_PIPE_CHV(val) (((val) & (3<<16)) >> 16) 6708#define DP_PORT_TO_PIPE_CHV(val) (((val) & (3<<16)) >> 16)
6629 6709
6630#define TRANS_DP_CTL_A 0xe0300 6710#define _TRANS_DP_CTL_A 0xe0300
6631#define TRANS_DP_CTL_B 0xe1300 6711#define _TRANS_DP_CTL_B 0xe1300
6632#define TRANS_DP_CTL_C 0xe2300 6712#define _TRANS_DP_CTL_C 0xe2300
6633#define TRANS_DP_CTL(pipe) _PIPE(pipe, TRANS_DP_CTL_A, TRANS_DP_CTL_B) 6713#define TRANS_DP_CTL(pipe) _MMIO_PIPE(pipe, _TRANS_DP_CTL_A, _TRANS_DP_CTL_B)
6634#define TRANS_DP_OUTPUT_ENABLE (1<<31) 6714#define TRANS_DP_OUTPUT_ENABLE (1<<31)
6635#define TRANS_DP_PORT_SEL_B (0<<29) 6715#define TRANS_DP_PORT_SEL_B (0<<29)
6636#define TRANS_DP_PORT_SEL_C (1<<29) 6716#define TRANS_DP_PORT_SEL_C (1<<29)
@@ -6683,40 +6763,40 @@ enum skl_disp_power_wells {
6683 6763
6684#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22) 6764#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22)
6685 6765
6686#define VLV_PMWGICZ 0x1300a4 6766#define VLV_PMWGICZ _MMIO(0x1300a4)
6687 6767
6688#define FORCEWAKE 0xA18C 6768#define FORCEWAKE _MMIO(0xA18C)
6689#define FORCEWAKE_VLV 0x1300b0 6769#define FORCEWAKE_VLV _MMIO(0x1300b0)
6690#define FORCEWAKE_ACK_VLV 0x1300b4 6770#define FORCEWAKE_ACK_VLV _MMIO(0x1300b4)
6691#define FORCEWAKE_MEDIA_VLV 0x1300b8 6771#define FORCEWAKE_MEDIA_VLV _MMIO(0x1300b8)
6692#define FORCEWAKE_ACK_MEDIA_VLV 0x1300bc 6772#define FORCEWAKE_ACK_MEDIA_VLV _MMIO(0x1300bc)
6693#define FORCEWAKE_ACK_HSW 0x130044 6773#define FORCEWAKE_ACK_HSW _MMIO(0x130044)
6694#define FORCEWAKE_ACK 0x130090 6774#define FORCEWAKE_ACK _MMIO(0x130090)
6695#define VLV_GTLC_WAKE_CTRL 0x130090 6775#define VLV_GTLC_WAKE_CTRL _MMIO(0x130090)
6696#define VLV_GTLC_RENDER_CTX_EXISTS (1 << 25) 6776#define VLV_GTLC_RENDER_CTX_EXISTS (1 << 25)
6697#define VLV_GTLC_MEDIA_CTX_EXISTS (1 << 24) 6777#define VLV_GTLC_MEDIA_CTX_EXISTS (1 << 24)
6698#define VLV_GTLC_ALLOWWAKEREQ (1 << 0) 6778#define VLV_GTLC_ALLOWWAKEREQ (1 << 0)
6699 6779
6700#define VLV_GTLC_PW_STATUS 0x130094 6780#define VLV_GTLC_PW_STATUS _MMIO(0x130094)
6701#define VLV_GTLC_ALLOWWAKEACK (1 << 0) 6781#define VLV_GTLC_ALLOWWAKEACK (1 << 0)
6702#define VLV_GTLC_ALLOWWAKEERR (1 << 1) 6782#define VLV_GTLC_ALLOWWAKEERR (1 << 1)
6703#define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5) 6783#define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5)
6704#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7) 6784#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7)
6705#define FORCEWAKE_MT 0xa188 /* multi-threaded */ 6785#define FORCEWAKE_MT _MMIO(0xa188) /* multi-threaded */
6706#define FORCEWAKE_MEDIA_GEN9 0xa270 6786#define FORCEWAKE_MEDIA_GEN9 _MMIO(0xa270)
6707#define FORCEWAKE_RENDER_GEN9 0xa278 6787#define FORCEWAKE_RENDER_GEN9 _MMIO(0xa278)
6708#define FORCEWAKE_BLITTER_GEN9 0xa188 6788#define FORCEWAKE_BLITTER_GEN9 _MMIO(0xa188)
6709#define FORCEWAKE_ACK_MEDIA_GEN9 0x0D88 6789#define FORCEWAKE_ACK_MEDIA_GEN9 _MMIO(0x0D88)
6710#define FORCEWAKE_ACK_RENDER_GEN9 0x0D84 6790#define FORCEWAKE_ACK_RENDER_GEN9 _MMIO(0x0D84)
6711#define FORCEWAKE_ACK_BLITTER_GEN9 0x130044 6791#define FORCEWAKE_ACK_BLITTER_GEN9 _MMIO(0x130044)
6712#define FORCEWAKE_KERNEL 0x1 6792#define FORCEWAKE_KERNEL 0x1
6713#define FORCEWAKE_USER 0x2 6793#define FORCEWAKE_USER 0x2
6714#define FORCEWAKE_MT_ACK 0x130040 6794#define FORCEWAKE_MT_ACK _MMIO(0x130040)
6715#define ECOBUS 0xa180 6795#define ECOBUS _MMIO(0xa180)
6716#define FORCEWAKE_MT_ENABLE (1<<5) 6796#define FORCEWAKE_MT_ENABLE (1<<5)
6717#define VLV_SPAREG2H 0xA194 6797#define VLV_SPAREG2H _MMIO(0xA194)
6718 6798
6719#define GTFIFODBG 0x120000 6799#define GTFIFODBG _MMIO(0x120000)
6720#define GT_FIFO_SBDROPERR (1<<6) 6800#define GT_FIFO_SBDROPERR (1<<6)
6721#define GT_FIFO_BLOBDROPERR (1<<5) 6801#define GT_FIFO_BLOBDROPERR (1<<5)
6722#define GT_FIFO_SB_READ_ABORTERR (1<<4) 6802#define GT_FIFO_SB_READ_ABORTERR (1<<4)
@@ -6725,23 +6805,23 @@ enum skl_disp_power_wells {
6725#define GT_FIFO_IAWRERR (1<<1) 6805#define GT_FIFO_IAWRERR (1<<1)
6726#define GT_FIFO_IARDERR (1<<0) 6806#define GT_FIFO_IARDERR (1<<0)
6727 6807
6728#define GTFIFOCTL 0x120008 6808#define GTFIFOCTL _MMIO(0x120008)
6729#define GT_FIFO_FREE_ENTRIES_MASK 0x7f 6809#define GT_FIFO_FREE_ENTRIES_MASK 0x7f
6730#define GT_FIFO_NUM_RESERVED_ENTRIES 20 6810#define GT_FIFO_NUM_RESERVED_ENTRIES 20
6731#define GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL (1 << 12) 6811#define GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL (1 << 12)
6732#define GT_FIFO_CTL_RC6_POLICY_STALL (1 << 11) 6812#define GT_FIFO_CTL_RC6_POLICY_STALL (1 << 11)
6733 6813
6734#define HSW_IDICR 0x9008 6814#define HSW_IDICR _MMIO(0x9008)
6735#define IDIHASHMSK(x) (((x) & 0x3f) << 16) 6815#define IDIHASHMSK(x) (((x) & 0x3f) << 16)
6736#define HSW_EDRAM_PRESENT 0x120010 6816#define HSW_EDRAM_PRESENT _MMIO(0x120010)
6737#define EDRAM_ENABLED 0x1 6817#define EDRAM_ENABLED 0x1
6738 6818
6739#define GEN6_UCGCTL1 0x9400 6819#define GEN6_UCGCTL1 _MMIO(0x9400)
6740# define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16) 6820# define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16)
6741# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5) 6821# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5)
6742# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7) 6822# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
6743 6823
6744#define GEN6_UCGCTL2 0x9404 6824#define GEN6_UCGCTL2 _MMIO(0x9404)
6745# define GEN6_VFUNIT_CLOCK_GATE_DISABLE (1 << 31) 6825# define GEN6_VFUNIT_CLOCK_GATE_DISABLE (1 << 31)
6746# define GEN7_VDSUNIT_CLOCK_GATE_DISABLE (1 << 30) 6826# define GEN7_VDSUNIT_CLOCK_GATE_DISABLE (1 << 30)
6747# define GEN7_TDLUNIT_CLOCK_GATE_DISABLE (1 << 22) 6827# define GEN7_TDLUNIT_CLOCK_GATE_DISABLE (1 << 22)
@@ -6749,30 +6829,30 @@ enum skl_disp_power_wells {
6749# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12) 6829# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12)
6750# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11) 6830# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11)
6751 6831
6752#define GEN6_UCGCTL3 0x9408 6832#define GEN6_UCGCTL3 _MMIO(0x9408)
6753 6833
6754#define GEN7_UCGCTL4 0x940c 6834#define GEN7_UCGCTL4 _MMIO(0x940c)
6755#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25) 6835#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25)
6756 6836
6757#define GEN6_RCGCTL1 0x9410 6837#define GEN6_RCGCTL1 _MMIO(0x9410)
6758#define GEN6_RCGCTL2 0x9414 6838#define GEN6_RCGCTL2 _MMIO(0x9414)
6759#define GEN6_RSTCTL 0x9420 6839#define GEN6_RSTCTL _MMIO(0x9420)
6760 6840
6761#define GEN8_UCGCTL6 0x9430 6841#define GEN8_UCGCTL6 _MMIO(0x9430)
6762#define GEN8_GAPSUNIT_CLOCK_GATE_DISABLE (1<<24) 6842#define GEN8_GAPSUNIT_CLOCK_GATE_DISABLE (1<<24)
6763#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14) 6843#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14)
6764#define GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ (1<<28) 6844#define GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ (1<<28)
6765 6845
6766#define GEN6_GFXPAUSE 0xA000 6846#define GEN6_GFXPAUSE _MMIO(0xA000)
6767#define GEN6_RPNSWREQ 0xA008 6847#define GEN6_RPNSWREQ _MMIO(0xA008)
6768#define GEN6_TURBO_DISABLE (1<<31) 6848#define GEN6_TURBO_DISABLE (1<<31)
6769#define GEN6_FREQUENCY(x) ((x)<<25) 6849#define GEN6_FREQUENCY(x) ((x)<<25)
6770#define HSW_FREQUENCY(x) ((x)<<24) 6850#define HSW_FREQUENCY(x) ((x)<<24)
6771#define GEN9_FREQUENCY(x) ((x)<<23) 6851#define GEN9_FREQUENCY(x) ((x)<<23)
6772#define GEN6_OFFSET(x) ((x)<<19) 6852#define GEN6_OFFSET(x) ((x)<<19)
6773#define GEN6_AGGRESSIVE_TURBO (0<<15) 6853#define GEN6_AGGRESSIVE_TURBO (0<<15)
6774#define GEN6_RC_VIDEO_FREQ 0xA00C 6854#define GEN6_RC_VIDEO_FREQ _MMIO(0xA00C)
6775#define GEN6_RC_CONTROL 0xA090 6855#define GEN6_RC_CONTROL _MMIO(0xA090)
6776#define GEN6_RC_CTL_RC6pp_ENABLE (1<<16) 6856#define GEN6_RC_CTL_RC6pp_ENABLE (1<<16)
6777#define GEN6_RC_CTL_RC6p_ENABLE (1<<17) 6857#define GEN6_RC_CTL_RC6p_ENABLE (1<<17)
6778#define GEN6_RC_CTL_RC6_ENABLE (1<<18) 6858#define GEN6_RC_CTL_RC6_ENABLE (1<<18)
@@ -6782,16 +6862,16 @@ enum skl_disp_power_wells {
6782#define GEN7_RC_CTL_TO_MODE (1<<28) 6862#define GEN7_RC_CTL_TO_MODE (1<<28)
6783#define GEN6_RC_CTL_EI_MODE(x) ((x)<<27) 6863#define GEN6_RC_CTL_EI_MODE(x) ((x)<<27)
6784#define GEN6_RC_CTL_HW_ENABLE (1<<31) 6864#define GEN6_RC_CTL_HW_ENABLE (1<<31)
6785#define GEN6_RP_DOWN_TIMEOUT 0xA010 6865#define GEN6_RP_DOWN_TIMEOUT _MMIO(0xA010)
6786#define GEN6_RP_INTERRUPT_LIMITS 0xA014 6866#define GEN6_RP_INTERRUPT_LIMITS _MMIO(0xA014)
6787#define GEN6_RPSTAT1 0xA01C 6867#define GEN6_RPSTAT1 _MMIO(0xA01C)
6788#define GEN6_CAGF_SHIFT 8 6868#define GEN6_CAGF_SHIFT 8
6789#define HSW_CAGF_SHIFT 7 6869#define HSW_CAGF_SHIFT 7
6790#define GEN9_CAGF_SHIFT 23 6870#define GEN9_CAGF_SHIFT 23
6791#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT) 6871#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT)
6792#define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT) 6872#define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT)
6793#define GEN9_CAGF_MASK (0x1ff << GEN9_CAGF_SHIFT) 6873#define GEN9_CAGF_MASK (0x1ff << GEN9_CAGF_SHIFT)
6794#define GEN6_RP_CONTROL 0xA024 6874#define GEN6_RP_CONTROL _MMIO(0xA024)
6795#define GEN6_RP_MEDIA_TURBO (1<<11) 6875#define GEN6_RP_MEDIA_TURBO (1<<11)
6796#define GEN6_RP_MEDIA_MODE_MASK (3<<9) 6876#define GEN6_RP_MEDIA_MODE_MASK (3<<9)
6797#define GEN6_RP_MEDIA_HW_TURBO_MODE (3<<9) 6877#define GEN6_RP_MEDIA_HW_TURBO_MODE (3<<9)
@@ -6805,53 +6885,53 @@ enum skl_disp_power_wells {
6805#define GEN6_RP_UP_BUSY_CONT (0x4<<3) 6885#define GEN6_RP_UP_BUSY_CONT (0x4<<3)
6806#define GEN6_RP_DOWN_IDLE_AVG (0x2<<0) 6886#define GEN6_RP_DOWN_IDLE_AVG (0x2<<0)
6807#define GEN6_RP_DOWN_IDLE_CONT (0x1<<0) 6887#define GEN6_RP_DOWN_IDLE_CONT (0x1<<0)
6808#define GEN6_RP_UP_THRESHOLD 0xA02C 6888#define GEN6_RP_UP_THRESHOLD _MMIO(0xA02C)
6809#define GEN6_RP_DOWN_THRESHOLD 0xA030 6889#define GEN6_RP_DOWN_THRESHOLD _MMIO(0xA030)
6810#define GEN6_RP_CUR_UP_EI 0xA050 6890#define GEN6_RP_CUR_UP_EI _MMIO(0xA050)
6811#define GEN6_CURICONT_MASK 0xffffff 6891#define GEN6_CURICONT_MASK 0xffffff
6812#define GEN6_RP_CUR_UP 0xA054 6892#define GEN6_RP_CUR_UP _MMIO(0xA054)
6813#define GEN6_CURBSYTAVG_MASK 0xffffff 6893#define GEN6_CURBSYTAVG_MASK 0xffffff
6814#define GEN6_RP_PREV_UP 0xA058 6894#define GEN6_RP_PREV_UP _MMIO(0xA058)
6815#define GEN6_RP_CUR_DOWN_EI 0xA05C 6895#define GEN6_RP_CUR_DOWN_EI _MMIO(0xA05C)
6816#define GEN6_CURIAVG_MASK 0xffffff 6896#define GEN6_CURIAVG_MASK 0xffffff
6817#define GEN6_RP_CUR_DOWN 0xA060 6897#define GEN6_RP_CUR_DOWN _MMIO(0xA060)
6818#define GEN6_RP_PREV_DOWN 0xA064 6898#define GEN6_RP_PREV_DOWN _MMIO(0xA064)
6819#define GEN6_RP_UP_EI 0xA068 6899#define GEN6_RP_UP_EI _MMIO(0xA068)
6820#define GEN6_RP_DOWN_EI 0xA06C 6900#define GEN6_RP_DOWN_EI _MMIO(0xA06C)
6821#define GEN6_RP_IDLE_HYSTERSIS 0xA070 6901#define GEN6_RP_IDLE_HYSTERSIS _MMIO(0xA070)
6822#define GEN6_RPDEUHWTC 0xA080 6902#define GEN6_RPDEUHWTC _MMIO(0xA080)
6823#define GEN6_RPDEUC 0xA084 6903#define GEN6_RPDEUC _MMIO(0xA084)
6824#define GEN6_RPDEUCSW 0xA088 6904#define GEN6_RPDEUCSW _MMIO(0xA088)
6825#define GEN6_RC_STATE 0xA094 6905#define GEN6_RC_STATE _MMIO(0xA094)
6826#define GEN6_RC1_WAKE_RATE_LIMIT 0xA098 6906#define GEN6_RC1_WAKE_RATE_LIMIT _MMIO(0xA098)
6827#define GEN6_RC6_WAKE_RATE_LIMIT 0xA09C 6907#define GEN6_RC6_WAKE_RATE_LIMIT _MMIO(0xA09C)
6828#define GEN6_RC6pp_WAKE_RATE_LIMIT 0xA0A0 6908#define GEN6_RC6pp_WAKE_RATE_LIMIT _MMIO(0xA0A0)
6829#define GEN6_RC_EVALUATION_INTERVAL 0xA0A8 6909#define GEN6_RC_EVALUATION_INTERVAL _MMIO(0xA0A8)
6830#define GEN6_RC_IDLE_HYSTERSIS 0xA0AC 6910#define GEN6_RC_IDLE_HYSTERSIS _MMIO(0xA0AC)
6831#define GEN6_RC_SLEEP 0xA0B0 6911#define GEN6_RC_SLEEP _MMIO(0xA0B0)
6832#define GEN6_RCUBMABDTMR 0xA0B0 6912#define GEN6_RCUBMABDTMR _MMIO(0xA0B0)
6833#define GEN6_RC1e_THRESHOLD 0xA0B4 6913#define GEN6_RC1e_THRESHOLD _MMIO(0xA0B4)
6834#define GEN6_RC6_THRESHOLD 0xA0B8 6914#define GEN6_RC6_THRESHOLD _MMIO(0xA0B8)
6835#define GEN6_RC6p_THRESHOLD 0xA0BC 6915#define GEN6_RC6p_THRESHOLD _MMIO(0xA0BC)
6836#define VLV_RCEDATA 0xA0BC 6916#define VLV_RCEDATA _MMIO(0xA0BC)
6837#define GEN6_RC6pp_THRESHOLD 0xA0C0 6917#define GEN6_RC6pp_THRESHOLD _MMIO(0xA0C0)
6838#define GEN6_PMINTRMSK 0xA168 6918#define GEN6_PMINTRMSK _MMIO(0xA168)
6839#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31) 6919#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31)
6840#define VLV_PWRDWNUPCTL 0xA294 6920#define VLV_PWRDWNUPCTL _MMIO(0xA294)
6841#define GEN9_MEDIA_PG_IDLE_HYSTERESIS 0xA0C4 6921#define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4)
6842#define GEN9_RENDER_PG_IDLE_HYSTERESIS 0xA0C8 6922#define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xA0C8)
6843#define GEN9_PG_ENABLE 0xA210 6923#define GEN9_PG_ENABLE _MMIO(0xA210)
6844#define GEN9_RENDER_PG_ENABLE (1<<0) 6924#define GEN9_RENDER_PG_ENABLE (1<<0)
6845#define GEN9_MEDIA_PG_ENABLE (1<<1) 6925#define GEN9_MEDIA_PG_ENABLE (1<<1)
6846 6926
6847#define VLV_CHICKEN_3 (VLV_DISPLAY_BASE + 0x7040C) 6927#define VLV_CHICKEN_3 _MMIO(VLV_DISPLAY_BASE + 0x7040C)
6848#define PIXEL_OVERLAP_CNT_MASK (3 << 30) 6928#define PIXEL_OVERLAP_CNT_MASK (3 << 30)
6849#define PIXEL_OVERLAP_CNT_SHIFT 30 6929#define PIXEL_OVERLAP_CNT_SHIFT 30
6850 6930
6851#define GEN6_PMISR 0x44020 6931#define GEN6_PMISR _MMIO(0x44020)
6852#define GEN6_PMIMR 0x44024 /* rps_lock */ 6932#define GEN6_PMIMR _MMIO(0x44024) /* rps_lock */
6853#define GEN6_PMIIR 0x44028 6933#define GEN6_PMIIR _MMIO(0x44028)
6854#define GEN6_PMIER 0x4402C 6934#define GEN6_PMIER _MMIO(0x4402C)
6855#define GEN6_PM_MBOX_EVENT (1<<25) 6935#define GEN6_PM_MBOX_EVENT (1<<25)
6856#define GEN6_PM_THERMAL_EVENT (1<<24) 6936#define GEN6_PM_THERMAL_EVENT (1<<24)
6857#define GEN6_PM_RP_DOWN_TIMEOUT (1<<6) 6937#define GEN6_PM_RP_DOWN_TIMEOUT (1<<6)
@@ -6863,30 +6943,30 @@ enum skl_disp_power_wells {
6863 GEN6_PM_RP_DOWN_THRESHOLD | \ 6943 GEN6_PM_RP_DOWN_THRESHOLD | \
6864 GEN6_PM_RP_DOWN_TIMEOUT) 6944 GEN6_PM_RP_DOWN_TIMEOUT)
6865 6945
6866#define GEN7_GT_SCRATCH(i) (0x4F100 + (i) * 4) 6946#define GEN7_GT_SCRATCH(i) _MMIO(0x4F100 + (i) * 4)
6867#define GEN7_GT_SCRATCH_REG_NUM 8 6947#define GEN7_GT_SCRATCH_REG_NUM 8
6868 6948
6869#define VLV_GTLC_SURVIVABILITY_REG 0x130098 6949#define VLV_GTLC_SURVIVABILITY_REG _MMIO(0x130098)
6870#define VLV_GFX_CLK_STATUS_BIT (1<<3) 6950#define VLV_GFX_CLK_STATUS_BIT (1<<3)
6871#define VLV_GFX_CLK_FORCE_ON_BIT (1<<2) 6951#define VLV_GFX_CLK_FORCE_ON_BIT (1<<2)
6872 6952
6873#define GEN6_GT_GFX_RC6_LOCKED 0x138104 6953#define GEN6_GT_GFX_RC6_LOCKED _MMIO(0x138104)
6874#define VLV_COUNTER_CONTROL 0x138104 6954#define VLV_COUNTER_CONTROL _MMIO(0x138104)
6875#define VLV_COUNT_RANGE_HIGH (1<<15) 6955#define VLV_COUNT_RANGE_HIGH (1<<15)
6876#define VLV_MEDIA_RC0_COUNT_EN (1<<5) 6956#define VLV_MEDIA_RC0_COUNT_EN (1<<5)
6877#define VLV_RENDER_RC0_COUNT_EN (1<<4) 6957#define VLV_RENDER_RC0_COUNT_EN (1<<4)
6878#define VLV_MEDIA_RC6_COUNT_EN (1<<1) 6958#define VLV_MEDIA_RC6_COUNT_EN (1<<1)
6879#define VLV_RENDER_RC6_COUNT_EN (1<<0) 6959#define VLV_RENDER_RC6_COUNT_EN (1<<0)
6880#define GEN6_GT_GFX_RC6 0x138108 6960#define GEN6_GT_GFX_RC6 _MMIO(0x138108)
6881#define VLV_GT_RENDER_RC6 0x138108 6961#define VLV_GT_RENDER_RC6 _MMIO(0x138108)
6882#define VLV_GT_MEDIA_RC6 0x13810C 6962#define VLV_GT_MEDIA_RC6 _MMIO(0x13810C)
6883 6963
6884#define GEN6_GT_GFX_RC6p 0x13810C 6964#define GEN6_GT_GFX_RC6p _MMIO(0x13810C)
6885#define GEN6_GT_GFX_RC6pp 0x138110 6965#define GEN6_GT_GFX_RC6pp _MMIO(0x138110)
6886#define VLV_RENDER_C0_COUNT 0x138118 6966#define VLV_RENDER_C0_COUNT _MMIO(0x138118)
6887#define VLV_MEDIA_C0_COUNT 0x13811C 6967#define VLV_MEDIA_C0_COUNT _MMIO(0x13811C)
6888 6968
6889#define GEN6_PCODE_MAILBOX 0x138124 6969#define GEN6_PCODE_MAILBOX _MMIO(0x138124)
6890#define GEN6_PCODE_READY (1<<31) 6970#define GEN6_PCODE_READY (1<<31)
6891#define GEN6_PCODE_WRITE_RC6VIDS 0x4 6971#define GEN6_PCODE_WRITE_RC6VIDS 0x4
6892#define GEN6_PCODE_READ_RC6VIDS 0x5 6972#define GEN6_PCODE_READ_RC6VIDS 0x5
@@ -6909,12 +6989,12 @@ enum skl_disp_power_wells {
6909#define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17 6989#define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17
6910#define DISPLAY_IPS_CONTROL 0x19 6990#define DISPLAY_IPS_CONTROL 0x19
6911#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A 6991#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A
6912#define GEN6_PCODE_DATA 0x138128 6992#define GEN6_PCODE_DATA _MMIO(0x138128)
6913#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 6993#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
6914#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16 6994#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
6915#define GEN6_PCODE_DATA1 0x13812C 6995#define GEN6_PCODE_DATA1 _MMIO(0x13812C)
6916 6996
6917#define GEN6_GT_CORE_STATUS 0x138060 6997#define GEN6_GT_CORE_STATUS _MMIO(0x138060)
6918#define GEN6_CORE_CPD_STATE_MASK (7<<4) 6998#define GEN6_CORE_CPD_STATE_MASK (7<<4)
6919#define GEN6_RCn_MASK 7 6999#define GEN6_RCn_MASK 7
6920#define GEN6_RC0 0 7000#define GEN6_RC0 0
@@ -6922,26 +7002,26 @@ enum skl_disp_power_wells {
6922#define GEN6_RC6 3 7002#define GEN6_RC6 3
6923#define GEN6_RC7 4 7003#define GEN6_RC7 4
6924 7004
6925#define GEN8_GT_SLICE_INFO 0x138064 7005#define GEN8_GT_SLICE_INFO _MMIO(0x138064)
6926#define GEN8_LSLICESTAT_MASK 0x7 7006#define GEN8_LSLICESTAT_MASK 0x7
6927 7007
6928#define CHV_POWER_SS0_SIG1 0xa720 7008#define CHV_POWER_SS0_SIG1 _MMIO(0xa720)
6929#define CHV_POWER_SS1_SIG1 0xa728 7009#define CHV_POWER_SS1_SIG1 _MMIO(0xa728)
6930#define CHV_SS_PG_ENABLE (1<<1) 7010#define CHV_SS_PG_ENABLE (1<<1)
6931#define CHV_EU08_PG_ENABLE (1<<9) 7011#define CHV_EU08_PG_ENABLE (1<<9)
6932#define CHV_EU19_PG_ENABLE (1<<17) 7012#define CHV_EU19_PG_ENABLE (1<<17)
6933#define CHV_EU210_PG_ENABLE (1<<25) 7013#define CHV_EU210_PG_ENABLE (1<<25)
6934 7014
6935#define CHV_POWER_SS0_SIG2 0xa724 7015#define CHV_POWER_SS0_SIG2 _MMIO(0xa724)
6936#define CHV_POWER_SS1_SIG2 0xa72c 7016#define CHV_POWER_SS1_SIG2 _MMIO(0xa72c)
6937#define CHV_EU311_PG_ENABLE (1<<1) 7017#define CHV_EU311_PG_ENABLE (1<<1)
6938 7018
6939#define GEN9_SLICE_PGCTL_ACK(slice) (0x804c + (slice)*0x4) 7019#define GEN9_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + (slice)*0x4)
6940#define GEN9_PGCTL_SLICE_ACK (1 << 0) 7020#define GEN9_PGCTL_SLICE_ACK (1 << 0)
6941#define GEN9_PGCTL_SS_ACK(subslice) (1 << (2 + (subslice)*2)) 7021#define GEN9_PGCTL_SS_ACK(subslice) (1 << (2 + (subslice)*2))
6942 7022
6943#define GEN9_SS01_EU_PGCTL_ACK(slice) (0x805c + (slice)*0x8) 7023#define GEN9_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + (slice)*0x8)
6944#define GEN9_SS23_EU_PGCTL_ACK(slice) (0x8060 + (slice)*0x8) 7024#define GEN9_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + (slice)*0x8)
6945#define GEN9_PGCTL_SSA_EU08_ACK (1 << 0) 7025#define GEN9_PGCTL_SSA_EU08_ACK (1 << 0)
6946#define GEN9_PGCTL_SSA_EU19_ACK (1 << 2) 7026#define GEN9_PGCTL_SSA_EU19_ACK (1 << 2)
6947#define GEN9_PGCTL_SSA_EU210_ACK (1 << 4) 7027#define GEN9_PGCTL_SSA_EU210_ACK (1 << 4)
@@ -6951,18 +7031,17 @@ enum skl_disp_power_wells {
6951#define GEN9_PGCTL_SSB_EU210_ACK (1 << 12) 7031#define GEN9_PGCTL_SSB_EU210_ACK (1 << 12)
6952#define GEN9_PGCTL_SSB_EU311_ACK (1 << 14) 7032#define GEN9_PGCTL_SSB_EU311_ACK (1 << 14)
6953 7033
6954#define GEN7_MISCCPCTL (0x9424) 7034#define GEN7_MISCCPCTL _MMIO(0x9424)
6955#define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0) 7035#define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0)
6956#define GEN8_DOP_CLOCK_GATE_CFCLK_ENABLE (1<<2) 7036#define GEN8_DOP_CLOCK_GATE_CFCLK_ENABLE (1<<2)
6957#define GEN8_DOP_CLOCK_GATE_GUC_ENABLE (1<<4) 7037#define GEN8_DOP_CLOCK_GATE_GUC_ENABLE (1<<4)
6958#define GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE (1<<6) 7038#define GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE (1<<6)
6959 7039
6960#define GEN8_GARBCNTL 0xB004 7040#define GEN8_GARBCNTL _MMIO(0xB004)
6961#define GEN9_GAPS_TSV_CREDIT_DISABLE (1<<7) 7041#define GEN9_GAPS_TSV_CREDIT_DISABLE (1<<7)
6962 7042
6963/* IVYBRIDGE DPF */ 7043/* IVYBRIDGE DPF */
6964#define GEN7_L3CDERRST1 0xB008 /* L3CD Error Status 1 */ 7044#define GEN7_L3CDERRST1(slice) _MMIO(0xB008 + (slice) * 0x200) /* L3CD Error Status 1 */
6965#define HSW_L3CDERRST11 0xB208 /* L3CD Error Status register 1 slice 1 */
6966#define GEN7_L3CDERRST1_ROW_MASK (0x7ff<<14) 7045#define GEN7_L3CDERRST1_ROW_MASK (0x7ff<<14)
6967#define GEN7_PARITY_ERROR_VALID (1<<13) 7046#define GEN7_PARITY_ERROR_VALID (1<<13)
6968#define GEN7_L3CDERRST1_BANK_MASK (3<<11) 7047#define GEN7_L3CDERRST1_BANK_MASK (3<<11)
@@ -6975,119 +7054,102 @@ enum skl_disp_power_wells {
6975 ((reg & GEN7_L3CDERRST1_SUBBANK_MASK) >> 8) 7054 ((reg & GEN7_L3CDERRST1_SUBBANK_MASK) >> 8)
6976#define GEN7_L3CDERRST1_ENABLE (1<<7) 7055#define GEN7_L3CDERRST1_ENABLE (1<<7)
6977 7056
6978#define GEN7_L3LOG_BASE 0xB070 7057#define GEN7_L3LOG(slice, i) _MMIO(0xB070 + (slice) * 0x200 + (i) * 4)
6979#define HSW_L3LOG_BASE_SLICE1 0xB270
6980#define GEN7_L3LOG_SIZE 0x80 7058#define GEN7_L3LOG_SIZE 0x80
6981 7059
6982#define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */ 7060#define GEN7_HALF_SLICE_CHICKEN1 _MMIO(0xe100) /* IVB GT1 + VLV */
6983#define GEN7_HALF_SLICE_CHICKEN1_GT2 0xf100 7061#define GEN7_HALF_SLICE_CHICKEN1_GT2 _MMIO(0xf100)
6984#define GEN7_MAX_PS_THREAD_DEP (8<<12) 7062#define GEN7_MAX_PS_THREAD_DEP (8<<12)
6985#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10) 7063#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10)
6986#define GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE (1<<4) 7064#define GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE (1<<4)
6987#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3) 7065#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
6988 7066
6989#define GEN9_HALF_SLICE_CHICKEN5 0xe188 7067#define GEN9_HALF_SLICE_CHICKEN5 _MMIO(0xe188)
6990#define GEN9_DG_MIRROR_FIX_ENABLE (1<<5) 7068#define GEN9_DG_MIRROR_FIX_ENABLE (1<<5)
6991#define GEN9_CCS_TLB_PREFETCH_ENABLE (1<<3) 7069#define GEN9_CCS_TLB_PREFETCH_ENABLE (1<<3)
6992 7070
6993#define GEN8_ROW_CHICKEN 0xe4f0 7071#define GEN8_ROW_CHICKEN _MMIO(0xe4f0)
6994#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8) 7072#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8)
6995#define STALL_DOP_GATING_DISABLE (1<<5) 7073#define STALL_DOP_GATING_DISABLE (1<<5)
6996 7074
6997#define GEN7_ROW_CHICKEN2 0xe4f4 7075#define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4)
6998#define GEN7_ROW_CHICKEN2_GT2 0xf4f4 7076#define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4)
6999#define DOP_CLOCK_GATING_DISABLE (1<<0) 7077#define DOP_CLOCK_GATING_DISABLE (1<<0)
7000 7078
7001#define HSW_ROW_CHICKEN3 0xe49c 7079#define HSW_ROW_CHICKEN3 _MMIO(0xe49c)
7002#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6) 7080#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
7003 7081
7004#define HALF_SLICE_CHICKEN2 0xe180 7082#define HALF_SLICE_CHICKEN2 _MMIO(0xe180)
7005#define GEN8_ST_PO_DISABLE (1<<13) 7083#define GEN8_ST_PO_DISABLE (1<<13)
7006 7084
7007#define HALF_SLICE_CHICKEN3 0xe184 7085#define HALF_SLICE_CHICKEN3 _MMIO(0xe184)
7008#define HSW_SAMPLE_C_PERFORMANCE (1<<9) 7086#define HSW_SAMPLE_C_PERFORMANCE (1<<9)
7009#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8) 7087#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8)
7010#define GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC (1<<5) 7088#define GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC (1<<5)
7011#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1) 7089#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1)
7012 7090
7013#define GEN9_HALF_SLICE_CHICKEN7 0xe194 7091#define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194)
7014#define GEN9_ENABLE_YV12_BUGFIX (1<<4) 7092#define GEN9_ENABLE_YV12_BUGFIX (1<<4)
7015 7093
7016/* Audio */ 7094/* Audio */
7017#define G4X_AUD_VID_DID (dev_priv->info.display_mmio_offset + 0x62020) 7095#define G4X_AUD_VID_DID _MMIO(dev_priv->info.display_mmio_offset + 0x62020)
7018#define INTEL_AUDIO_DEVCL 0x808629FB 7096#define INTEL_AUDIO_DEVCL 0x808629FB
7019#define INTEL_AUDIO_DEVBLC 0x80862801 7097#define INTEL_AUDIO_DEVBLC 0x80862801
7020#define INTEL_AUDIO_DEVCTG 0x80862802 7098#define INTEL_AUDIO_DEVCTG 0x80862802
7021 7099
7022#define G4X_AUD_CNTL_ST 0x620B4 7100#define G4X_AUD_CNTL_ST _MMIO(0x620B4)
7023#define G4X_ELDV_DEVCL_DEVBLC (1 << 13) 7101#define G4X_ELDV_DEVCL_DEVBLC (1 << 13)
7024#define G4X_ELDV_DEVCTG (1 << 14) 7102#define G4X_ELDV_DEVCTG (1 << 14)
7025#define G4X_ELD_ADDR_MASK (0xf << 5) 7103#define G4X_ELD_ADDR_MASK (0xf << 5)
7026#define G4X_ELD_ACK (1 << 4) 7104#define G4X_ELD_ACK (1 << 4)
7027#define G4X_HDMIW_HDMIEDID 0x6210C 7105#define G4X_HDMIW_HDMIEDID _MMIO(0x6210C)
7028 7106
7029#define _IBX_HDMIW_HDMIEDID_A 0xE2050 7107#define _IBX_HDMIW_HDMIEDID_A 0xE2050
7030#define _IBX_HDMIW_HDMIEDID_B 0xE2150 7108#define _IBX_HDMIW_HDMIEDID_B 0xE2150
7031#define IBX_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \ 7109#define IBX_HDMIW_HDMIEDID(pipe) _MMIO_PIPE(pipe, _IBX_HDMIW_HDMIEDID_A, \
7032 _IBX_HDMIW_HDMIEDID_A, \ 7110 _IBX_HDMIW_HDMIEDID_B)
7033 _IBX_HDMIW_HDMIEDID_B)
7034#define _IBX_AUD_CNTL_ST_A 0xE20B4 7111#define _IBX_AUD_CNTL_ST_A 0xE20B4
7035#define _IBX_AUD_CNTL_ST_B 0xE21B4 7112#define _IBX_AUD_CNTL_ST_B 0xE21B4
7036#define IBX_AUD_CNTL_ST(pipe) _PIPE(pipe, \ 7113#define IBX_AUD_CNTL_ST(pipe) _MMIO_PIPE(pipe, _IBX_AUD_CNTL_ST_A, \
7037 _IBX_AUD_CNTL_ST_A, \ 7114 _IBX_AUD_CNTL_ST_B)
7038 _IBX_AUD_CNTL_ST_B)
7039#define IBX_ELD_BUFFER_SIZE_MASK (0x1f << 10) 7115#define IBX_ELD_BUFFER_SIZE_MASK (0x1f << 10)
7040#define IBX_ELD_ADDRESS_MASK (0x1f << 5) 7116#define IBX_ELD_ADDRESS_MASK (0x1f << 5)
7041#define IBX_ELD_ACK (1 << 4) 7117#define IBX_ELD_ACK (1 << 4)
7042#define IBX_AUD_CNTL_ST2 0xE20C0 7118#define IBX_AUD_CNTL_ST2 _MMIO(0xE20C0)
7043#define IBX_CP_READY(port) ((1 << 1) << (((port) - 1) * 4)) 7119#define IBX_CP_READY(port) ((1 << 1) << (((port) - 1) * 4))
7044#define IBX_ELD_VALID(port) ((1 << 0) << (((port) - 1) * 4)) 7120#define IBX_ELD_VALID(port) ((1 << 0) << (((port) - 1) * 4))
7045 7121
7046#define _CPT_HDMIW_HDMIEDID_A 0xE5050 7122#define _CPT_HDMIW_HDMIEDID_A 0xE5050
7047#define _CPT_HDMIW_HDMIEDID_B 0xE5150 7123#define _CPT_HDMIW_HDMIEDID_B 0xE5150
7048#define CPT_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \ 7124#define CPT_HDMIW_HDMIEDID(pipe) _MMIO_PIPE(pipe, _CPT_HDMIW_HDMIEDID_A, _CPT_HDMIW_HDMIEDID_B)
7049 _CPT_HDMIW_HDMIEDID_A, \
7050 _CPT_HDMIW_HDMIEDID_B)
7051#define _CPT_AUD_CNTL_ST_A 0xE50B4 7125#define _CPT_AUD_CNTL_ST_A 0xE50B4
7052#define _CPT_AUD_CNTL_ST_B 0xE51B4 7126#define _CPT_AUD_CNTL_ST_B 0xE51B4
7053#define CPT_AUD_CNTL_ST(pipe) _PIPE(pipe, \ 7127#define CPT_AUD_CNTL_ST(pipe) _MMIO_PIPE(pipe, _CPT_AUD_CNTL_ST_A, _CPT_AUD_CNTL_ST_B)
7054 _CPT_AUD_CNTL_ST_A, \ 7128#define CPT_AUD_CNTRL_ST2 _MMIO(0xE50C0)
7055 _CPT_AUD_CNTL_ST_B)
7056#define CPT_AUD_CNTRL_ST2 0xE50C0
7057 7129
7058#define _VLV_HDMIW_HDMIEDID_A (VLV_DISPLAY_BASE + 0x62050) 7130#define _VLV_HDMIW_HDMIEDID_A (VLV_DISPLAY_BASE + 0x62050)
7059#define _VLV_HDMIW_HDMIEDID_B (VLV_DISPLAY_BASE + 0x62150) 7131#define _VLV_HDMIW_HDMIEDID_B (VLV_DISPLAY_BASE + 0x62150)
7060#define VLV_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \ 7132#define VLV_HDMIW_HDMIEDID(pipe) _MMIO_PIPE(pipe, _VLV_HDMIW_HDMIEDID_A, _VLV_HDMIW_HDMIEDID_B)
7061 _VLV_HDMIW_HDMIEDID_A, \
7062 _VLV_HDMIW_HDMIEDID_B)
7063#define _VLV_AUD_CNTL_ST_A (VLV_DISPLAY_BASE + 0x620B4) 7133#define _VLV_AUD_CNTL_ST_A (VLV_DISPLAY_BASE + 0x620B4)
7064#define _VLV_AUD_CNTL_ST_B (VLV_DISPLAY_BASE + 0x621B4) 7134#define _VLV_AUD_CNTL_ST_B (VLV_DISPLAY_BASE + 0x621B4)
7065#define VLV_AUD_CNTL_ST(pipe) _PIPE(pipe, \ 7135#define VLV_AUD_CNTL_ST(pipe) _MMIO_PIPE(pipe, _VLV_AUD_CNTL_ST_A, _VLV_AUD_CNTL_ST_B)
7066 _VLV_AUD_CNTL_ST_A, \ 7136#define VLV_AUD_CNTL_ST2 _MMIO(VLV_DISPLAY_BASE + 0x620C0)
7067 _VLV_AUD_CNTL_ST_B)
7068#define VLV_AUD_CNTL_ST2 (VLV_DISPLAY_BASE + 0x620C0)
7069 7137
7070/* These are the 4 32-bit write offset registers for each stream 7138/* These are the 4 32-bit write offset registers for each stream
7071 * output buffer. It determines the offset from the 7139 * output buffer. It determines the offset from the
7072 * 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to. 7140 * 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to.
7073 */ 7141 */
7074#define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4) 7142#define GEN7_SO_WRITE_OFFSET(n) _MMIO(0x5280 + (n) * 4)
7075 7143
7076#define _IBX_AUD_CONFIG_A 0xe2000 7144#define _IBX_AUD_CONFIG_A 0xe2000
7077#define _IBX_AUD_CONFIG_B 0xe2100 7145#define _IBX_AUD_CONFIG_B 0xe2100
7078#define IBX_AUD_CFG(pipe) _PIPE(pipe, \ 7146#define IBX_AUD_CFG(pipe) _MMIO_PIPE(pipe, _IBX_AUD_CONFIG_A, _IBX_AUD_CONFIG_B)
7079 _IBX_AUD_CONFIG_A, \
7080 _IBX_AUD_CONFIG_B)
7081#define _CPT_AUD_CONFIG_A 0xe5000 7147#define _CPT_AUD_CONFIG_A 0xe5000
7082#define _CPT_AUD_CONFIG_B 0xe5100 7148#define _CPT_AUD_CONFIG_B 0xe5100
7083#define CPT_AUD_CFG(pipe) _PIPE(pipe, \ 7149#define CPT_AUD_CFG(pipe) _MMIO_PIPE(pipe, _CPT_AUD_CONFIG_A, _CPT_AUD_CONFIG_B)
7084 _CPT_AUD_CONFIG_A, \
7085 _CPT_AUD_CONFIG_B)
7086#define _VLV_AUD_CONFIG_A (VLV_DISPLAY_BASE + 0x62000) 7150#define _VLV_AUD_CONFIG_A (VLV_DISPLAY_BASE + 0x62000)
7087#define _VLV_AUD_CONFIG_B (VLV_DISPLAY_BASE + 0x62100) 7151#define _VLV_AUD_CONFIG_B (VLV_DISPLAY_BASE + 0x62100)
7088#define VLV_AUD_CFG(pipe) _PIPE(pipe, \ 7152#define VLV_AUD_CFG(pipe) _MMIO_PIPE(pipe, _VLV_AUD_CONFIG_A, _VLV_AUD_CONFIG_B)
7089 _VLV_AUD_CONFIG_A, \
7090 _VLV_AUD_CONFIG_B)
7091 7153
7092#define AUD_CONFIG_N_VALUE_INDEX (1 << 29) 7154#define AUD_CONFIG_N_VALUE_INDEX (1 << 29)
7093#define AUD_CONFIG_N_PROG_ENABLE (1 << 28) 7155#define AUD_CONFIG_N_PROG_ENABLE (1 << 28)
@@ -7112,72 +7174,62 @@ enum skl_disp_power_wells {
7112/* HSW Audio */ 7174/* HSW Audio */
7113#define _HSW_AUD_CONFIG_A 0x65000 7175#define _HSW_AUD_CONFIG_A 0x65000
7114#define _HSW_AUD_CONFIG_B 0x65100 7176#define _HSW_AUD_CONFIG_B 0x65100
7115#define HSW_AUD_CFG(pipe) _PIPE(pipe, \ 7177#define HSW_AUD_CFG(pipe) _MMIO_PIPE(pipe, _HSW_AUD_CONFIG_A, _HSW_AUD_CONFIG_B)
7116 _HSW_AUD_CONFIG_A, \
7117 _HSW_AUD_CONFIG_B)
7118 7178
7119#define _HSW_AUD_MISC_CTRL_A 0x65010 7179#define _HSW_AUD_MISC_CTRL_A 0x65010
7120#define _HSW_AUD_MISC_CTRL_B 0x65110 7180#define _HSW_AUD_MISC_CTRL_B 0x65110
7121#define HSW_AUD_MISC_CTRL(pipe) _PIPE(pipe, \ 7181#define HSW_AUD_MISC_CTRL(pipe) _MMIO_PIPE(pipe, _HSW_AUD_MISC_CTRL_A, _HSW_AUD_MISC_CTRL_B)
7122 _HSW_AUD_MISC_CTRL_A, \
7123 _HSW_AUD_MISC_CTRL_B)
7124 7182
7125#define _HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4 7183#define _HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4
7126#define _HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4 7184#define _HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4
7127#define HSW_AUD_DIP_ELD_CTRL(pipe) _PIPE(pipe, \ 7185#define HSW_AUD_DIP_ELD_CTRL(pipe) _MMIO_PIPE(pipe, _HSW_AUD_DIP_ELD_CTRL_ST_A, _HSW_AUD_DIP_ELD_CTRL_ST_B)
7128 _HSW_AUD_DIP_ELD_CTRL_ST_A, \
7129 _HSW_AUD_DIP_ELD_CTRL_ST_B)
7130 7186
7131/* Audio Digital Converter */ 7187/* Audio Digital Converter */
7132#define _HSW_AUD_DIG_CNVT_1 0x65080 7188#define _HSW_AUD_DIG_CNVT_1 0x65080
7133#define _HSW_AUD_DIG_CNVT_2 0x65180 7189#define _HSW_AUD_DIG_CNVT_2 0x65180
7134#define AUD_DIG_CNVT(pipe) _PIPE(pipe, \ 7190#define AUD_DIG_CNVT(pipe) _MMIO_PIPE(pipe, _HSW_AUD_DIG_CNVT_1, _HSW_AUD_DIG_CNVT_2)
7135 _HSW_AUD_DIG_CNVT_1, \
7136 _HSW_AUD_DIG_CNVT_2)
7137#define DIP_PORT_SEL_MASK 0x3 7191#define DIP_PORT_SEL_MASK 0x3
7138 7192
7139#define _HSW_AUD_EDID_DATA_A 0x65050 7193#define _HSW_AUD_EDID_DATA_A 0x65050
7140#define _HSW_AUD_EDID_DATA_B 0x65150 7194#define _HSW_AUD_EDID_DATA_B 0x65150
7141#define HSW_AUD_EDID_DATA(pipe) _PIPE(pipe, \ 7195#define HSW_AUD_EDID_DATA(pipe) _MMIO_PIPE(pipe, _HSW_AUD_EDID_DATA_A, _HSW_AUD_EDID_DATA_B)
7142 _HSW_AUD_EDID_DATA_A, \
7143 _HSW_AUD_EDID_DATA_B)
7144 7196
7145#define HSW_AUD_PIPE_CONV_CFG 0x6507c 7197#define HSW_AUD_PIPE_CONV_CFG _MMIO(0x6507c)
7146#define HSW_AUD_PIN_ELD_CP_VLD 0x650c0 7198#define HSW_AUD_PIN_ELD_CP_VLD _MMIO(0x650c0)
7147#define AUDIO_INACTIVE(trans) ((1 << 3) << ((trans) * 4)) 7199#define AUDIO_INACTIVE(trans) ((1 << 3) << ((trans) * 4))
7148#define AUDIO_OUTPUT_ENABLE(trans) ((1 << 2) << ((trans) * 4)) 7200#define AUDIO_OUTPUT_ENABLE(trans) ((1 << 2) << ((trans) * 4))
7149#define AUDIO_CP_READY(trans) ((1 << 1) << ((trans) * 4)) 7201#define AUDIO_CP_READY(trans) ((1 << 1) << ((trans) * 4))
7150#define AUDIO_ELD_VALID(trans) ((1 << 0) << ((trans) * 4)) 7202#define AUDIO_ELD_VALID(trans) ((1 << 0) << ((trans) * 4))
7151 7203
7152#define HSW_AUD_CHICKENBIT 0x65f10 7204#define HSW_AUD_CHICKENBIT _MMIO(0x65f10)
7153#define SKL_AUD_CODEC_WAKE_SIGNAL (1 << 15) 7205#define SKL_AUD_CODEC_WAKE_SIGNAL (1 << 15)
7154 7206
7155/* HSW Power Wells */ 7207/* HSW Power Wells */
7156#define HSW_PWR_WELL_BIOS 0x45400 /* CTL1 */ 7208#define HSW_PWR_WELL_BIOS _MMIO(0x45400) /* CTL1 */
7157#define HSW_PWR_WELL_DRIVER 0x45404 /* CTL2 */ 7209#define HSW_PWR_WELL_DRIVER _MMIO(0x45404) /* CTL2 */
7158#define HSW_PWR_WELL_KVMR 0x45408 /* CTL3 */ 7210#define HSW_PWR_WELL_KVMR _MMIO(0x45408) /* CTL3 */
7159#define HSW_PWR_WELL_DEBUG 0x4540C /* CTL4 */ 7211#define HSW_PWR_WELL_DEBUG _MMIO(0x4540C) /* CTL4 */
7160#define HSW_PWR_WELL_ENABLE_REQUEST (1<<31) 7212#define HSW_PWR_WELL_ENABLE_REQUEST (1<<31)
7161#define HSW_PWR_WELL_STATE_ENABLED (1<<30) 7213#define HSW_PWR_WELL_STATE_ENABLED (1<<30)
7162#define HSW_PWR_WELL_CTL5 0x45410 7214#define HSW_PWR_WELL_CTL5 _MMIO(0x45410)
7163#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31) 7215#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31)
7164#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20) 7216#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20)
7165#define HSW_PWR_WELL_FORCE_ON (1<<19) 7217#define HSW_PWR_WELL_FORCE_ON (1<<19)
7166#define HSW_PWR_WELL_CTL6 0x45414 7218#define HSW_PWR_WELL_CTL6 _MMIO(0x45414)
7167 7219
7168/* SKL Fuse Status */ 7220/* SKL Fuse Status */
7169#define SKL_FUSE_STATUS 0x42000 7221#define SKL_FUSE_STATUS _MMIO(0x42000)
7170#define SKL_FUSE_DOWNLOAD_STATUS (1<<31) 7222#define SKL_FUSE_DOWNLOAD_STATUS (1<<31)
7171#define SKL_FUSE_PG0_DIST_STATUS (1<<27) 7223#define SKL_FUSE_PG0_DIST_STATUS (1<<27)
7172#define SKL_FUSE_PG1_DIST_STATUS (1<<26) 7224#define SKL_FUSE_PG1_DIST_STATUS (1<<26)
7173#define SKL_FUSE_PG2_DIST_STATUS (1<<25) 7225#define SKL_FUSE_PG2_DIST_STATUS (1<<25)
7174 7226
7175/* Per-pipe DDI Function Control */ 7227/* Per-pipe DDI Function Control */
7176#define TRANS_DDI_FUNC_CTL_A 0x60400 7228#define _TRANS_DDI_FUNC_CTL_A 0x60400
7177#define TRANS_DDI_FUNC_CTL_B 0x61400 7229#define _TRANS_DDI_FUNC_CTL_B 0x61400
7178#define TRANS_DDI_FUNC_CTL_C 0x62400 7230#define _TRANS_DDI_FUNC_CTL_C 0x62400
7179#define TRANS_DDI_FUNC_CTL_EDP 0x6F400 7231#define _TRANS_DDI_FUNC_CTL_EDP 0x6F400
7180#define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER2(tran, TRANS_DDI_FUNC_CTL_A) 7232#define TRANS_DDI_FUNC_CTL(tran) _MMIO_TRANS2(tran, _TRANS_DDI_FUNC_CTL_A)
7181 7233
7182#define TRANS_DDI_FUNC_ENABLE (1<<31) 7234#define TRANS_DDI_FUNC_ENABLE (1<<31)
7183/* Those bits are ignored by pipe EDP since it can only connect to DDI A */ 7235/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
@@ -7207,9 +7259,9 @@ enum skl_disp_power_wells {
7207#define TRANS_DDI_BFI_ENABLE (1<<4) 7259#define TRANS_DDI_BFI_ENABLE (1<<4)
7208 7260
7209/* DisplayPort Transport Control */ 7261/* DisplayPort Transport Control */
7210#define DP_TP_CTL_A 0x64040 7262#define _DP_TP_CTL_A 0x64040
7211#define DP_TP_CTL_B 0x64140 7263#define _DP_TP_CTL_B 0x64140
7212#define DP_TP_CTL(port) _PORT(port, DP_TP_CTL_A, DP_TP_CTL_B) 7264#define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B)
7213#define DP_TP_CTL_ENABLE (1<<31) 7265#define DP_TP_CTL_ENABLE (1<<31)
7214#define DP_TP_CTL_MODE_SST (0<<27) 7266#define DP_TP_CTL_MODE_SST (0<<27)
7215#define DP_TP_CTL_MODE_MST (1<<27) 7267#define DP_TP_CTL_MODE_MST (1<<27)
@@ -7225,9 +7277,9 @@ enum skl_disp_power_wells {
7225#define DP_TP_CTL_SCRAMBLE_DISABLE (1<<7) 7277#define DP_TP_CTL_SCRAMBLE_DISABLE (1<<7)
7226 7278
7227/* DisplayPort Transport Status */ 7279/* DisplayPort Transport Status */
7228#define DP_TP_STATUS_A 0x64044 7280#define _DP_TP_STATUS_A 0x64044
7229#define DP_TP_STATUS_B 0x64144 7281#define _DP_TP_STATUS_B 0x64144
7230#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B) 7282#define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B)
7231#define DP_TP_STATUS_IDLE_DONE (1<<25) 7283#define DP_TP_STATUS_IDLE_DONE (1<<25)
7232#define DP_TP_STATUS_ACT_SENT (1<<24) 7284#define DP_TP_STATUS_ACT_SENT (1<<24)
7233#define DP_TP_STATUS_MODE_STATUS_MST (1<<23) 7285#define DP_TP_STATUS_MODE_STATUS_MST (1<<23)
@@ -7237,9 +7289,9 @@ enum skl_disp_power_wells {
7237#define DP_TP_STATUS_PAYLOAD_MAPPING_VC0 (3 << 0) 7289#define DP_TP_STATUS_PAYLOAD_MAPPING_VC0 (3 << 0)
7238 7290
7239/* DDI Buffer Control */ 7291/* DDI Buffer Control */
7240#define DDI_BUF_CTL_A 0x64000 7292#define _DDI_BUF_CTL_A 0x64000
7241#define DDI_BUF_CTL_B 0x64100 7293#define _DDI_BUF_CTL_B 0x64100
7242#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B) 7294#define DDI_BUF_CTL(port) _MMIO_PORT(port, _DDI_BUF_CTL_A, _DDI_BUF_CTL_B)
7243#define DDI_BUF_CTL_ENABLE (1<<31) 7295#define DDI_BUF_CTL_ENABLE (1<<31)
7244#define DDI_BUF_TRANS_SELECT(n) ((n) << 24) 7296#define DDI_BUF_TRANS_SELECT(n) ((n) << 24)
7245#define DDI_BUF_EMP_MASK (0xf<<24) 7297#define DDI_BUF_EMP_MASK (0xf<<24)
@@ -7252,17 +7304,17 @@ enum skl_disp_power_wells {
7252#define DDI_INIT_DISPLAY_DETECTED (1<<0) 7304#define DDI_INIT_DISPLAY_DETECTED (1<<0)
7253 7305
7254/* DDI Buffer Translations */ 7306/* DDI Buffer Translations */
7255#define DDI_BUF_TRANS_A 0x64E00 7307#define _DDI_BUF_TRANS_A 0x64E00
7256#define DDI_BUF_TRANS_B 0x64E60 7308#define _DDI_BUF_TRANS_B 0x64E60
7257#define DDI_BUF_TRANS_LO(port, i) (_PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B) + (i) * 8) 7309#define DDI_BUF_TRANS_LO(port, i) _MMIO(_PORT(port, _DDI_BUF_TRANS_A, _DDI_BUF_TRANS_B) + (i) * 8)
7258#define DDI_BUF_TRANS_HI(port, i) (_PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B) + (i) * 8 + 4) 7310#define DDI_BUF_TRANS_HI(port, i) _MMIO(_PORT(port, _DDI_BUF_TRANS_A, _DDI_BUF_TRANS_B) + (i) * 8 + 4)
7259 7311
7260/* Sideband Interface (SBI) is programmed indirectly, via 7312/* Sideband Interface (SBI) is programmed indirectly, via
7261 * SBI_ADDR, which contains the register offset; and SBI_DATA, 7313 * SBI_ADDR, which contains the register offset; and SBI_DATA,
7262 * which contains the payload */ 7314 * which contains the payload */
7263#define SBI_ADDR 0xC6000 7315#define SBI_ADDR _MMIO(0xC6000)
7264#define SBI_DATA 0xC6004 7316#define SBI_DATA _MMIO(0xC6004)
7265#define SBI_CTL_STAT 0xC6008 7317#define SBI_CTL_STAT _MMIO(0xC6008)
7266#define SBI_CTL_DEST_ICLK (0x0<<16) 7318#define SBI_CTL_DEST_ICLK (0x0<<16)
7267#define SBI_CTL_DEST_MPHY (0x1<<16) 7319#define SBI_CTL_DEST_MPHY (0x1<<16)
7268#define SBI_CTL_OP_IORD (0x2<<8) 7320#define SBI_CTL_OP_IORD (0x2<<8)
@@ -7293,12 +7345,12 @@ enum skl_disp_power_wells {
7293#define SBI_GEN0_CFG_BUFFENABLE_DISABLE (1<<0) 7345#define SBI_GEN0_CFG_BUFFENABLE_DISABLE (1<<0)
7294 7346
7295/* LPT PIXCLK_GATE */ 7347/* LPT PIXCLK_GATE */
7296#define PIXCLK_GATE 0xC6020 7348#define PIXCLK_GATE _MMIO(0xC6020)
7297#define PIXCLK_GATE_UNGATE (1<<0) 7349#define PIXCLK_GATE_UNGATE (1<<0)
7298#define PIXCLK_GATE_GATE (0<<0) 7350#define PIXCLK_GATE_GATE (0<<0)
7299 7351
7300/* SPLL */ 7352/* SPLL */
7301#define SPLL_CTL 0x46020 7353#define SPLL_CTL _MMIO(0x46020)
7302#define SPLL_PLL_ENABLE (1<<31) 7354#define SPLL_PLL_ENABLE (1<<31)
7303#define SPLL_PLL_SSC (1<<28) 7355#define SPLL_PLL_SSC (1<<28)
7304#define SPLL_PLL_NON_SSC (2<<28) 7356#define SPLL_PLL_NON_SSC (2<<28)
@@ -7310,9 +7362,9 @@ enum skl_disp_power_wells {
7310#define SPLL_PLL_FREQ_MASK (3<<26) 7362#define SPLL_PLL_FREQ_MASK (3<<26)
7311 7363
7312/* WRPLL */ 7364/* WRPLL */
7313#define WRPLL_CTL1 0x46040 7365#define _WRPLL_CTL1 0x46040
7314#define WRPLL_CTL2 0x46060 7366#define _WRPLL_CTL2 0x46060
7315#define WRPLL_CTL(pll) (pll == 0 ? WRPLL_CTL1 : WRPLL_CTL2) 7367#define WRPLL_CTL(pll) _MMIO_PIPE(pll, _WRPLL_CTL1, _WRPLL_CTL2)
7316#define WRPLL_PLL_ENABLE (1<<31) 7368#define WRPLL_PLL_ENABLE (1<<31)
7317#define WRPLL_PLL_SSC (1<<28) 7369#define WRPLL_PLL_SSC (1<<28)
7318#define WRPLL_PLL_NON_SSC (2<<28) 7370#define WRPLL_PLL_NON_SSC (2<<28)
@@ -7329,9 +7381,9 @@ enum skl_disp_power_wells {
7329#define WRPLL_DIVIDER_FB_MASK (0xff<<16) 7381#define WRPLL_DIVIDER_FB_MASK (0xff<<16)
7330 7382
7331/* Port clock selection */ 7383/* Port clock selection */
7332#define PORT_CLK_SEL_A 0x46100 7384#define _PORT_CLK_SEL_A 0x46100
7333#define PORT_CLK_SEL_B 0x46104 7385#define _PORT_CLK_SEL_B 0x46104
7334#define PORT_CLK_SEL(port) _PORT(port, PORT_CLK_SEL_A, PORT_CLK_SEL_B) 7386#define PORT_CLK_SEL(port) _MMIO_PORT(port, _PORT_CLK_SEL_A, _PORT_CLK_SEL_B)
7335#define PORT_CLK_SEL_LCPLL_2700 (0<<29) 7387#define PORT_CLK_SEL_LCPLL_2700 (0<<29)
7336#define PORT_CLK_SEL_LCPLL_1350 (1<<29) 7388#define PORT_CLK_SEL_LCPLL_1350 (1<<29)
7337#define PORT_CLK_SEL_LCPLL_810 (2<<29) 7389#define PORT_CLK_SEL_LCPLL_810 (2<<29)
@@ -7343,18 +7395,18 @@ enum skl_disp_power_wells {
7343#define PORT_CLK_SEL_MASK (7<<29) 7395#define PORT_CLK_SEL_MASK (7<<29)
7344 7396
7345/* Transcoder clock selection */ 7397/* Transcoder clock selection */
7346#define TRANS_CLK_SEL_A 0x46140 7398#define _TRANS_CLK_SEL_A 0x46140
7347#define TRANS_CLK_SEL_B 0x46144 7399#define _TRANS_CLK_SEL_B 0x46144
7348#define TRANS_CLK_SEL(tran) _TRANSCODER(tran, TRANS_CLK_SEL_A, TRANS_CLK_SEL_B) 7400#define TRANS_CLK_SEL(tran) _MMIO_TRANS(tran, _TRANS_CLK_SEL_A, _TRANS_CLK_SEL_B)
7349/* For each transcoder, we need to select the corresponding port clock */ 7401/* For each transcoder, we need to select the corresponding port clock */
7350#define TRANS_CLK_SEL_DISABLED (0x0<<29) 7402#define TRANS_CLK_SEL_DISABLED (0x0<<29)
7351#define TRANS_CLK_SEL_PORT(x) (((x)+1)<<29) 7403#define TRANS_CLK_SEL_PORT(x) (((x)+1)<<29)
7352 7404
7353#define TRANSA_MSA_MISC 0x60410 7405#define _TRANSA_MSA_MISC 0x60410
7354#define TRANSB_MSA_MISC 0x61410 7406#define _TRANSB_MSA_MISC 0x61410
7355#define TRANSC_MSA_MISC 0x62410 7407#define _TRANSC_MSA_MISC 0x62410
7356#define TRANS_EDP_MSA_MISC 0x6f410 7408#define _TRANS_EDP_MSA_MISC 0x6f410
7357#define TRANS_MSA_MISC(tran) _TRANSCODER2(tran, TRANSA_MSA_MISC) 7409#define TRANS_MSA_MISC(tran) _MMIO_TRANS2(tran, _TRANSA_MSA_MISC)
7358 7410
7359#define TRANS_MSA_SYNC_CLK (1<<0) 7411#define TRANS_MSA_SYNC_CLK (1<<0)
7360#define TRANS_MSA_6_BPC (0<<5) 7412#define TRANS_MSA_6_BPC (0<<5)
@@ -7364,7 +7416,7 @@ enum skl_disp_power_wells {
7364#define TRANS_MSA_16_BPC (4<<5) 7416#define TRANS_MSA_16_BPC (4<<5)
7365 7417
7366/* LCPLL Control */ 7418/* LCPLL Control */
7367#define LCPLL_CTL 0x130040 7419#define LCPLL_CTL _MMIO(0x130040)
7368#define LCPLL_PLL_DISABLE (1<<31) 7420#define LCPLL_PLL_DISABLE (1<<31)
7369#define LCPLL_PLL_LOCK (1<<30) 7421#define LCPLL_PLL_LOCK (1<<30)
7370#define LCPLL_CLK_FREQ_MASK (3<<26) 7422#define LCPLL_CLK_FREQ_MASK (3<<26)
@@ -7384,7 +7436,7 @@ enum skl_disp_power_wells {
7384 */ 7436 */
7385 7437
7386/* CDCLK_CTL */ 7438/* CDCLK_CTL */
7387#define CDCLK_CTL 0x46000 7439#define CDCLK_CTL _MMIO(0x46000)
7388#define CDCLK_FREQ_SEL_MASK (3<<26) 7440#define CDCLK_FREQ_SEL_MASK (3<<26)
7389#define CDCLK_FREQ_450_432 (0<<26) 7441#define CDCLK_FREQ_450_432 (0<<26)
7390#define CDCLK_FREQ_540 (1<<26) 7442#define CDCLK_FREQ_540 (1<<26)
@@ -7400,12 +7452,12 @@ enum skl_disp_power_wells {
7400#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16) 7452#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16)
7401 7453
7402/* LCPLL_CTL */ 7454/* LCPLL_CTL */
7403#define LCPLL1_CTL 0x46010 7455#define LCPLL1_CTL _MMIO(0x46010)
7404#define LCPLL2_CTL 0x46014 7456#define LCPLL2_CTL _MMIO(0x46014)
7405#define LCPLL_PLL_ENABLE (1<<31) 7457#define LCPLL_PLL_ENABLE (1<<31)
7406 7458
7407/* DPLL control1 */ 7459/* DPLL control1 */
7408#define DPLL_CTRL1 0x6C058 7460#define DPLL_CTRL1 _MMIO(0x6C058)
7409#define DPLL_CTRL1_HDMI_MODE(id) (1<<((id)*6+5)) 7461#define DPLL_CTRL1_HDMI_MODE(id) (1<<((id)*6+5))
7410#define DPLL_CTRL1_SSC(id) (1<<((id)*6+4)) 7462#define DPLL_CTRL1_SSC(id) (1<<((id)*6+4))
7411#define DPLL_CTRL1_LINK_RATE_MASK(id) (7<<((id)*6+1)) 7463#define DPLL_CTRL1_LINK_RATE_MASK(id) (7<<((id)*6+1))
@@ -7420,7 +7472,7 @@ enum skl_disp_power_wells {
7420#define DPLL_CTRL1_LINK_RATE_2160 5 7472#define DPLL_CTRL1_LINK_RATE_2160 5
7421 7473
7422/* DPLL control2 */ 7474/* DPLL control2 */
7423#define DPLL_CTRL2 0x6C05C 7475#define DPLL_CTRL2 _MMIO(0x6C05C)
7424#define DPLL_CTRL2_DDI_CLK_OFF(port) (1<<((port)+15)) 7476#define DPLL_CTRL2_DDI_CLK_OFF(port) (1<<((port)+15))
7425#define DPLL_CTRL2_DDI_CLK_SEL_MASK(port) (3<<((port)*3+1)) 7477#define DPLL_CTRL2_DDI_CLK_SEL_MASK(port) (3<<((port)*3+1))
7426#define DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port) ((port)*3+1) 7478#define DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port) ((port)*3+1)
@@ -7428,21 +7480,21 @@ enum skl_disp_power_wells {
7428#define DPLL_CTRL2_DDI_SEL_OVERRIDE(port) (1<<((port)*3)) 7480#define DPLL_CTRL2_DDI_SEL_OVERRIDE(port) (1<<((port)*3))
7429 7481
7430/* DPLL Status */ 7482/* DPLL Status */
7431#define DPLL_STATUS 0x6C060 7483#define DPLL_STATUS _MMIO(0x6C060)
7432#define DPLL_LOCK(id) (1<<((id)*8)) 7484#define DPLL_LOCK(id) (1<<((id)*8))
7433 7485
7434/* DPLL cfg */ 7486/* DPLL cfg */
7435#define DPLL1_CFGCR1 0x6C040 7487#define _DPLL1_CFGCR1 0x6C040
7436#define DPLL2_CFGCR1 0x6C048 7488#define _DPLL2_CFGCR1 0x6C048
7437#define DPLL3_CFGCR1 0x6C050 7489#define _DPLL3_CFGCR1 0x6C050
7438#define DPLL_CFGCR1_FREQ_ENABLE (1<<31) 7490#define DPLL_CFGCR1_FREQ_ENABLE (1<<31)
7439#define DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff<<9) 7491#define DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff<<9)
7440#define DPLL_CFGCR1_DCO_FRACTION(x) ((x)<<9) 7492#define DPLL_CFGCR1_DCO_FRACTION(x) ((x)<<9)
7441#define DPLL_CFGCR1_DCO_INTEGER_MASK (0x1ff) 7493#define DPLL_CFGCR1_DCO_INTEGER_MASK (0x1ff)
7442 7494
7443#define DPLL1_CFGCR2 0x6C044 7495#define _DPLL1_CFGCR2 0x6C044
7444#define DPLL2_CFGCR2 0x6C04C 7496#define _DPLL2_CFGCR2 0x6C04C
7445#define DPLL3_CFGCR2 0x6C054 7497#define _DPLL3_CFGCR2 0x6C054
7446#define DPLL_CFGCR2_QDIV_RATIO_MASK (0xff<<8) 7498#define DPLL_CFGCR2_QDIV_RATIO_MASK (0xff<<8)
7447#define DPLL_CFGCR2_QDIV_RATIO(x) ((x)<<8) 7499#define DPLL_CFGCR2_QDIV_RATIO(x) ((x)<<8)
7448#define DPLL_CFGCR2_QDIV_MODE(x) ((x)<<7) 7500#define DPLL_CFGCR2_QDIV_MODE(x) ((x)<<7)
@@ -7460,58 +7512,59 @@ enum skl_disp_power_wells {
7460#define DPLL_CFGCR2_PDIV_7 (4<<2) 7512#define DPLL_CFGCR2_PDIV_7 (4<<2)
7461#define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3) 7513#define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3)
7462 7514
7463#define DPLL_CFGCR1(id) (DPLL1_CFGCR1 + ((id) - SKL_DPLL1) * 8) 7515#define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR2)
7464#define DPLL_CFGCR2(id) (DPLL1_CFGCR2 + ((id) - SKL_DPLL1) * 8) 7516#define DPLL_CFGCR2(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR2, _DPLL2_CFGCR2)
7465 7517
7466/* BXT display engine PLL */ 7518/* BXT display engine PLL */
7467#define BXT_DE_PLL_CTL 0x6d000 7519#define BXT_DE_PLL_CTL _MMIO(0x6d000)
7468#define BXT_DE_PLL_RATIO(x) (x) /* {60,65,100} * 19.2MHz */ 7520#define BXT_DE_PLL_RATIO(x) (x) /* {60,65,100} * 19.2MHz */
7469#define BXT_DE_PLL_RATIO_MASK 0xff 7521#define BXT_DE_PLL_RATIO_MASK 0xff
7470 7522
7471#define BXT_DE_PLL_ENABLE 0x46070 7523#define BXT_DE_PLL_ENABLE _MMIO(0x46070)
7472#define BXT_DE_PLL_PLL_ENABLE (1 << 31) 7524#define BXT_DE_PLL_PLL_ENABLE (1 << 31)
7473#define BXT_DE_PLL_LOCK (1 << 30) 7525#define BXT_DE_PLL_LOCK (1 << 30)
7474 7526
7475/* GEN9 DC */ 7527/* GEN9 DC */
7476#define DC_STATE_EN 0x45504 7528#define DC_STATE_EN _MMIO(0x45504)
7529#define DC_STATE_DISABLE 0
7477#define DC_STATE_EN_UPTO_DC5 (1<<0) 7530#define DC_STATE_EN_UPTO_DC5 (1<<0)
7478#define DC_STATE_EN_DC9 (1<<3) 7531#define DC_STATE_EN_DC9 (1<<3)
7479#define DC_STATE_EN_UPTO_DC6 (2<<0) 7532#define DC_STATE_EN_UPTO_DC6 (2<<0)
7480#define DC_STATE_EN_UPTO_DC5_DC6_MASK 0x3 7533#define DC_STATE_EN_UPTO_DC5_DC6_MASK 0x3
7481 7534
7482#define DC_STATE_DEBUG 0x45520 7535#define DC_STATE_DEBUG _MMIO(0x45520)
7483#define DC_STATE_DEBUG_MASK_MEMORY_UP (1<<1) 7536#define DC_STATE_DEBUG_MASK_MEMORY_UP (1<<1)
7484 7537
7485/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register, 7538/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
7486 * since on HSW we can't write to it using I915_WRITE. */ 7539 * since on HSW we can't write to it using I915_WRITE. */
7487#define D_COMP_HSW (MCHBAR_MIRROR_BASE_SNB + 0x5F0C) 7540#define D_COMP_HSW _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
7488#define D_COMP_BDW 0x138144 7541#define D_COMP_BDW _MMIO(0x138144)
7489#define D_COMP_RCOMP_IN_PROGRESS (1<<9) 7542#define D_COMP_RCOMP_IN_PROGRESS (1<<9)
7490#define D_COMP_COMP_FORCE (1<<8) 7543#define D_COMP_COMP_FORCE (1<<8)
7491#define D_COMP_COMP_DISABLE (1<<0) 7544#define D_COMP_COMP_DISABLE (1<<0)
7492 7545
7493/* Pipe WM_LINETIME - watermark line time */ 7546/* Pipe WM_LINETIME - watermark line time */
7494#define PIPE_WM_LINETIME_A 0x45270 7547#define _PIPE_WM_LINETIME_A 0x45270
7495#define PIPE_WM_LINETIME_B 0x45274 7548#define _PIPE_WM_LINETIME_B 0x45274
7496#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, PIPE_WM_LINETIME_A, \ 7549#define PIPE_WM_LINETIME(pipe) _MMIO_PIPE(pipe, _PIPE_WM_LINETIME_A, _PIPE_WM_LINETIME_B)
7497 PIPE_WM_LINETIME_B)
7498#define PIPE_WM_LINETIME_MASK (0x1ff) 7550#define PIPE_WM_LINETIME_MASK (0x1ff)
7499#define PIPE_WM_LINETIME_TIME(x) ((x)) 7551#define PIPE_WM_LINETIME_TIME(x) ((x))
7500#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16) 7552#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16)
7501#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16) 7553#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16)
7502 7554
7503/* SFUSE_STRAP */ 7555/* SFUSE_STRAP */
7504#define SFUSE_STRAP 0xc2014 7556#define SFUSE_STRAP _MMIO(0xc2014)
7505#define SFUSE_STRAP_FUSE_LOCK (1<<13) 7557#define SFUSE_STRAP_FUSE_LOCK (1<<13)
7506#define SFUSE_STRAP_DISPLAY_DISABLED (1<<7) 7558#define SFUSE_STRAP_DISPLAY_DISABLED (1<<7)
7559#define SFUSE_STRAP_CRT_DISABLED (1<<6)
7507#define SFUSE_STRAP_DDIB_DETECTED (1<<2) 7560#define SFUSE_STRAP_DDIB_DETECTED (1<<2)
7508#define SFUSE_STRAP_DDIC_DETECTED (1<<1) 7561#define SFUSE_STRAP_DDIC_DETECTED (1<<1)
7509#define SFUSE_STRAP_DDID_DETECTED (1<<0) 7562#define SFUSE_STRAP_DDID_DETECTED (1<<0)
7510 7563
7511#define WM_MISC 0x45260 7564#define WM_MISC _MMIO(0x45260)
7512#define WM_MISC_DATA_PARTITION_5_6 (1 << 0) 7565#define WM_MISC_DATA_PARTITION_5_6 (1 << 0)
7513 7566
7514#define WM_DBG 0x45280 7567#define WM_DBG _MMIO(0x45280)
7515#define WM_DBG_DISALLOW_MULTIPLE_LP (1<<0) 7568#define WM_DBG_DISALLOW_MULTIPLE_LP (1<<0)
7516#define WM_DBG_DISALLOW_MAXFIFO (1<<1) 7569#define WM_DBG_DISALLOW_MAXFIFO (1<<1)
7517#define WM_DBG_DISALLOW_SPRITE (1<<2) 7570#define WM_DBG_DISALLOW_SPRITE (1<<2)
@@ -7548,28 +7601,29 @@ enum skl_disp_power_wells {
7548#define _PIPE_B_CSC_POSTOFF_ME 0x49144 7601#define _PIPE_B_CSC_POSTOFF_ME 0x49144
7549#define _PIPE_B_CSC_POSTOFF_LO 0x49148 7602#define _PIPE_B_CSC_POSTOFF_LO 0x49148
7550 7603
7551#define PIPE_CSC_COEFF_RY_GY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RY_GY, _PIPE_B_CSC_COEFF_RY_GY) 7604#define PIPE_CSC_COEFF_RY_GY(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_COEFF_RY_GY, _PIPE_B_CSC_COEFF_RY_GY)
7552#define PIPE_CSC_COEFF_BY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BY, _PIPE_B_CSC_COEFF_BY) 7605#define PIPE_CSC_COEFF_BY(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_COEFF_BY, _PIPE_B_CSC_COEFF_BY)
7553#define PIPE_CSC_COEFF_RU_GU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RU_GU, _PIPE_B_CSC_COEFF_RU_GU) 7606#define PIPE_CSC_COEFF_RU_GU(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_COEFF_RU_GU, _PIPE_B_CSC_COEFF_RU_GU)
7554#define PIPE_CSC_COEFF_BU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BU, _PIPE_B_CSC_COEFF_BU) 7607#define PIPE_CSC_COEFF_BU(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_COEFF_BU, _PIPE_B_CSC_COEFF_BU)
7555#define PIPE_CSC_COEFF_RV_GV(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RV_GV, _PIPE_B_CSC_COEFF_RV_GV) 7608#define PIPE_CSC_COEFF_RV_GV(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_COEFF_RV_GV, _PIPE_B_CSC_COEFF_RV_GV)
7556#define PIPE_CSC_COEFF_BV(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BV, _PIPE_B_CSC_COEFF_BV) 7609#define PIPE_CSC_COEFF_BV(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_COEFF_BV, _PIPE_B_CSC_COEFF_BV)
7557#define PIPE_CSC_MODE(pipe) _PIPE(pipe, _PIPE_A_CSC_MODE, _PIPE_B_CSC_MODE) 7610#define PIPE_CSC_MODE(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_MODE, _PIPE_B_CSC_MODE)
7558#define PIPE_CSC_PREOFF_HI(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_HI, _PIPE_B_CSC_PREOFF_HI) 7611#define PIPE_CSC_PREOFF_HI(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_PREOFF_HI, _PIPE_B_CSC_PREOFF_HI)
7559#define PIPE_CSC_PREOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_ME, _PIPE_B_CSC_PREOFF_ME) 7612#define PIPE_CSC_PREOFF_ME(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_PREOFF_ME, _PIPE_B_CSC_PREOFF_ME)
7560#define PIPE_CSC_PREOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_LO, _PIPE_B_CSC_PREOFF_LO) 7613#define PIPE_CSC_PREOFF_LO(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_PREOFF_LO, _PIPE_B_CSC_PREOFF_LO)
7561#define PIPE_CSC_POSTOFF_HI(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_HI, _PIPE_B_CSC_POSTOFF_HI) 7614#define PIPE_CSC_POSTOFF_HI(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_POSTOFF_HI, _PIPE_B_CSC_POSTOFF_HI)
7562#define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME) 7615#define PIPE_CSC_POSTOFF_ME(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
7563#define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO) 7616#define PIPE_CSC_POSTOFF_LO(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
7564 7617
7565/* MIPI DSI registers */ 7618/* MIPI DSI registers */
7566 7619
7567#define _MIPI_PORT(port, a, c) _PORT3(port, a, 0, c) /* ports A and C only */ 7620#define _MIPI_PORT(port, a, c) _PORT3(port, a, 0, c) /* ports A and C only */
7621#define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c))
7568 7622
7569/* BXT MIPI clock controls */ 7623/* BXT MIPI clock controls */
7570#define BXT_MAX_VAR_OUTPUT_KHZ 39500 7624#define BXT_MAX_VAR_OUTPUT_KHZ 39500
7571 7625
7572#define BXT_MIPI_CLOCK_CTL 0x46090 7626#define BXT_MIPI_CLOCK_CTL _MMIO(0x46090)
7573#define BXT_MIPI1_DIV_SHIFT 26 7627#define BXT_MIPI1_DIV_SHIFT 26
7574#define BXT_MIPI2_DIV_SHIFT 10 7628#define BXT_MIPI2_DIV_SHIFT 10
7575#define BXT_MIPI_DIV_SHIFT(port) \ 7629#define BXT_MIPI_DIV_SHIFT(port) \
@@ -7631,20 +7685,20 @@ enum skl_disp_power_wells {
7631/* BXT MIPI mode configure */ 7685/* BXT MIPI mode configure */
7632#define _BXT_MIPIA_TRANS_HACTIVE 0x6B0F8 7686#define _BXT_MIPIA_TRANS_HACTIVE 0x6B0F8
7633#define _BXT_MIPIC_TRANS_HACTIVE 0x6B8F8 7687#define _BXT_MIPIC_TRANS_HACTIVE 0x6B8F8
7634#define BXT_MIPI_TRANS_HACTIVE(tc) _MIPI_PORT(tc, \ 7688#define BXT_MIPI_TRANS_HACTIVE(tc) _MMIO_MIPI(tc, \
7635 _BXT_MIPIA_TRANS_HACTIVE, _BXT_MIPIC_TRANS_HACTIVE) 7689 _BXT_MIPIA_TRANS_HACTIVE, _BXT_MIPIC_TRANS_HACTIVE)
7636 7690
7637#define _BXT_MIPIA_TRANS_VACTIVE 0x6B0FC 7691#define _BXT_MIPIA_TRANS_VACTIVE 0x6B0FC
7638#define _BXT_MIPIC_TRANS_VACTIVE 0x6B8FC 7692#define _BXT_MIPIC_TRANS_VACTIVE 0x6B8FC
7639#define BXT_MIPI_TRANS_VACTIVE(tc) _MIPI_PORT(tc, \ 7693#define BXT_MIPI_TRANS_VACTIVE(tc) _MMIO_MIPI(tc, \
7640 _BXT_MIPIA_TRANS_VACTIVE, _BXT_MIPIC_TRANS_VACTIVE) 7694 _BXT_MIPIA_TRANS_VACTIVE, _BXT_MIPIC_TRANS_VACTIVE)
7641 7695
7642#define _BXT_MIPIA_TRANS_VTOTAL 0x6B100 7696#define _BXT_MIPIA_TRANS_VTOTAL 0x6B100
7643#define _BXT_MIPIC_TRANS_VTOTAL 0x6B900 7697#define _BXT_MIPIC_TRANS_VTOTAL 0x6B900
7644#define BXT_MIPI_TRANS_VTOTAL(tc) _MIPI_PORT(tc, \ 7698#define BXT_MIPI_TRANS_VTOTAL(tc) _MMIO_MIPI(tc, \
7645 _BXT_MIPIA_TRANS_VTOTAL, _BXT_MIPIC_TRANS_VTOTAL) 7699 _BXT_MIPIA_TRANS_VTOTAL, _BXT_MIPIC_TRANS_VTOTAL)
7646 7700
7647#define BXT_DSI_PLL_CTL 0x161000 7701#define BXT_DSI_PLL_CTL _MMIO(0x161000)
7648#define BXT_DSI_PLL_PVD_RATIO_SHIFT 16 7702#define BXT_DSI_PLL_PVD_RATIO_SHIFT 16
7649#define BXT_DSI_PLL_PVD_RATIO_MASK (3 << BXT_DSI_PLL_PVD_RATIO_SHIFT) 7703#define BXT_DSI_PLL_PVD_RATIO_MASK (3 << BXT_DSI_PLL_PVD_RATIO_SHIFT)
7650#define BXT_DSI_PLL_PVD_RATIO_1 (1 << BXT_DSI_PLL_PVD_RATIO_SHIFT) 7704#define BXT_DSI_PLL_PVD_RATIO_1 (1 << BXT_DSI_PLL_PVD_RATIO_SHIFT)
@@ -7660,21 +7714,20 @@ enum skl_disp_power_wells {
7660#define BXT_DSI_PLL_RATIO_MAX 0x7D 7714#define BXT_DSI_PLL_RATIO_MAX 0x7D
7661#define BXT_DSI_PLL_RATIO_MIN 0x22 7715#define BXT_DSI_PLL_RATIO_MIN 0x22
7662#define BXT_DSI_PLL_RATIO_MASK 0xFF 7716#define BXT_DSI_PLL_RATIO_MASK 0xFF
7663#define BXT_REF_CLOCK_KHZ 19500 7717#define BXT_REF_CLOCK_KHZ 19200
7664 7718
7665#define BXT_DSI_PLL_ENABLE 0x46080 7719#define BXT_DSI_PLL_ENABLE _MMIO(0x46080)
7666#define BXT_DSI_PLL_DO_ENABLE (1 << 31) 7720#define BXT_DSI_PLL_DO_ENABLE (1 << 31)
7667#define BXT_DSI_PLL_LOCKED (1 << 30) 7721#define BXT_DSI_PLL_LOCKED (1 << 30)
7668 7722
7669#define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190) 7723#define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190)
7670#define _MIPIC_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700) 7724#define _MIPIC_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700)
7671#define MIPI_PORT_CTRL(port) _MIPI_PORT(port, _MIPIA_PORT_CTRL, _MIPIC_PORT_CTRL) 7725#define MIPI_PORT_CTRL(port) _MMIO_MIPI(port, _MIPIA_PORT_CTRL, _MIPIC_PORT_CTRL)
7672 7726
7673 /* BXT port control */ 7727 /* BXT port control */
7674#define _BXT_MIPIA_PORT_CTRL 0x6B0C0 7728#define _BXT_MIPIA_PORT_CTRL 0x6B0C0
7675#define _BXT_MIPIC_PORT_CTRL 0x6B8C0 7729#define _BXT_MIPIC_PORT_CTRL 0x6B8C0
7676#define BXT_MIPI_PORT_CTRL(tc) _MIPI_PORT(tc, _BXT_MIPIA_PORT_CTRL, \ 7730#define BXT_MIPI_PORT_CTRL(tc) _MMIO_MIPI(tc, _BXT_MIPIA_PORT_CTRL, _BXT_MIPIC_PORT_CTRL)
7677 _BXT_MIPIC_PORT_CTRL)
7678 7731
7679#define DPI_ENABLE (1 << 31) /* A + C */ 7732#define DPI_ENABLE (1 << 31) /* A + C */
7680#define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27 7733#define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27
@@ -7718,8 +7771,7 @@ enum skl_disp_power_wells {
7718 7771
7719#define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194) 7772#define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194)
7720#define _MIPIC_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704) 7773#define _MIPIC_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704)
7721#define MIPI_TEARING_CTRL(port) _MIPI_PORT(port, \ 7774#define MIPI_TEARING_CTRL(port) _MMIO_MIPI(port, _MIPIA_TEARING_CTRL, _MIPIC_TEARING_CTRL)
7722 _MIPIA_TEARING_CTRL, _MIPIC_TEARING_CTRL)
7723#define TEARING_EFFECT_DELAY_SHIFT 0 7775#define TEARING_EFFECT_DELAY_SHIFT 0
7724#define TEARING_EFFECT_DELAY_MASK (0xffff << 0) 7776#define TEARING_EFFECT_DELAY_MASK (0xffff << 0)
7725 7777
@@ -7730,8 +7782,7 @@ enum skl_disp_power_wells {
7730 7782
7731#define _MIPIA_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb000) 7783#define _MIPIA_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb000)
7732#define _MIPIC_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb800) 7784#define _MIPIC_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb800)
7733#define MIPI_DEVICE_READY(port) _MIPI_PORT(port, _MIPIA_DEVICE_READY, \ 7785#define MIPI_DEVICE_READY(port) _MMIO_MIPI(port, _MIPIA_DEVICE_READY, _MIPIC_DEVICE_READY)
7734 _MIPIC_DEVICE_READY)
7735#define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */ 7786#define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */
7736#define ULPS_STATE_MASK (3 << 1) 7787#define ULPS_STATE_MASK (3 << 1)
7737#define ULPS_STATE_ENTER (2 << 1) 7788#define ULPS_STATE_ENTER (2 << 1)
@@ -7741,12 +7792,10 @@ enum skl_disp_power_wells {
7741 7792
7742#define _MIPIA_INTR_STAT (dev_priv->mipi_mmio_base + 0xb004) 7793#define _MIPIA_INTR_STAT (dev_priv->mipi_mmio_base + 0xb004)
7743#define _MIPIC_INTR_STAT (dev_priv->mipi_mmio_base + 0xb804) 7794#define _MIPIC_INTR_STAT (dev_priv->mipi_mmio_base + 0xb804)
7744#define MIPI_INTR_STAT(port) _MIPI_PORT(port, _MIPIA_INTR_STAT, \ 7795#define MIPI_INTR_STAT(port) _MMIO_MIPI(port, _MIPIA_INTR_STAT, _MIPIC_INTR_STAT)
7745 _MIPIC_INTR_STAT)
7746#define _MIPIA_INTR_EN (dev_priv->mipi_mmio_base + 0xb008) 7796#define _MIPIA_INTR_EN (dev_priv->mipi_mmio_base + 0xb008)
7747#define _MIPIC_INTR_EN (dev_priv->mipi_mmio_base + 0xb808) 7797#define _MIPIC_INTR_EN (dev_priv->mipi_mmio_base + 0xb808)
7748#define MIPI_INTR_EN(port) _MIPI_PORT(port, _MIPIA_INTR_EN, \ 7798#define MIPI_INTR_EN(port) _MMIO_MIPI(port, _MIPIA_INTR_EN, _MIPIC_INTR_EN)
7749 _MIPIC_INTR_EN)
7750#define TEARING_EFFECT (1 << 31) 7799#define TEARING_EFFECT (1 << 31)
7751#define SPL_PKT_SENT_INTERRUPT (1 << 30) 7800#define SPL_PKT_SENT_INTERRUPT (1 << 30)
7752#define GEN_READ_DATA_AVAIL (1 << 29) 7801#define GEN_READ_DATA_AVAIL (1 << 29)
@@ -7782,8 +7831,7 @@ enum skl_disp_power_wells {
7782 7831
7783#define _MIPIA_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb00c) 7832#define _MIPIA_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb00c)
7784#define _MIPIC_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb80c) 7833#define _MIPIC_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb80c)
7785#define MIPI_DSI_FUNC_PRG(port) _MIPI_PORT(port, _MIPIA_DSI_FUNC_PRG, \ 7834#define MIPI_DSI_FUNC_PRG(port) _MMIO_MIPI(port, _MIPIA_DSI_FUNC_PRG, _MIPIC_DSI_FUNC_PRG)
7786 _MIPIC_DSI_FUNC_PRG)
7787#define CMD_MODE_DATA_WIDTH_MASK (7 << 13) 7835#define CMD_MODE_DATA_WIDTH_MASK (7 << 13)
7788#define CMD_MODE_NOT_SUPPORTED (0 << 13) 7836#define CMD_MODE_NOT_SUPPORTED (0 << 13)
7789#define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13) 7837#define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13)
@@ -7806,32 +7854,27 @@ enum skl_disp_power_wells {
7806 7854
7807#define _MIPIA_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb010) 7855#define _MIPIA_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb010)
7808#define _MIPIC_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb810) 7856#define _MIPIC_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb810)
7809#define MIPI_HS_TX_TIMEOUT(port) _MIPI_PORT(port, _MIPIA_HS_TX_TIMEOUT, \ 7857#define MIPI_HS_TX_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_HS_TX_TIMEOUT, _MIPIC_HS_TX_TIMEOUT)
7810 _MIPIC_HS_TX_TIMEOUT)
7811#define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff 7858#define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff
7812 7859
7813#define _MIPIA_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb014) 7860#define _MIPIA_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb014)
7814#define _MIPIC_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb814) 7861#define _MIPIC_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb814)
7815#define MIPI_LP_RX_TIMEOUT(port) _MIPI_PORT(port, _MIPIA_LP_RX_TIMEOUT, \ 7862#define MIPI_LP_RX_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_LP_RX_TIMEOUT, _MIPIC_LP_RX_TIMEOUT)
7816 _MIPIC_LP_RX_TIMEOUT)
7817#define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff 7863#define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff
7818 7864
7819#define _MIPIA_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb018) 7865#define _MIPIA_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb018)
7820#define _MIPIC_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb818) 7866#define _MIPIC_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb818)
7821#define MIPI_TURN_AROUND_TIMEOUT(port) _MIPI_PORT(port, \ 7867#define MIPI_TURN_AROUND_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIC_TURN_AROUND_TIMEOUT)
7822 _MIPIA_TURN_AROUND_TIMEOUT, _MIPIC_TURN_AROUND_TIMEOUT)
7823#define TURN_AROUND_TIMEOUT_MASK 0x3f 7868#define TURN_AROUND_TIMEOUT_MASK 0x3f
7824 7869
7825#define _MIPIA_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb01c) 7870#define _MIPIA_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb01c)
7826#define _MIPIC_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb81c) 7871#define _MIPIC_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb81c)
7827#define MIPI_DEVICE_RESET_TIMER(port) _MIPI_PORT(port, \ 7872#define MIPI_DEVICE_RESET_TIMER(port) _MMIO_MIPI(port, _MIPIA_DEVICE_RESET_TIMER, _MIPIC_DEVICE_RESET_TIMER)
7828 _MIPIA_DEVICE_RESET_TIMER, _MIPIC_DEVICE_RESET_TIMER)
7829#define DEVICE_RESET_TIMER_MASK 0xffff 7873#define DEVICE_RESET_TIMER_MASK 0xffff
7830 7874
7831#define _MIPIA_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb020) 7875#define _MIPIA_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb020)
7832#define _MIPIC_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb820) 7876#define _MIPIC_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb820)
7833#define MIPI_DPI_RESOLUTION(port) _MIPI_PORT(port, _MIPIA_DPI_RESOLUTION, \ 7877#define MIPI_DPI_RESOLUTION(port) _MMIO_MIPI(port, _MIPIA_DPI_RESOLUTION, _MIPIC_DPI_RESOLUTION)
7834 _MIPIC_DPI_RESOLUTION)
7835#define VERTICAL_ADDRESS_SHIFT 16 7878#define VERTICAL_ADDRESS_SHIFT 16
7836#define VERTICAL_ADDRESS_MASK (0xffff << 16) 7879#define VERTICAL_ADDRESS_MASK (0xffff << 16)
7837#define HORIZONTAL_ADDRESS_SHIFT 0 7880#define HORIZONTAL_ADDRESS_SHIFT 0
@@ -7839,8 +7882,7 @@ enum skl_disp_power_wells {
7839 7882
7840#define _MIPIA_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb024) 7883#define _MIPIA_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb024)
7841#define _MIPIC_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb824) 7884#define _MIPIC_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb824)
7842#define MIPI_DBI_FIFO_THROTTLE(port) _MIPI_PORT(port, \ 7885#define MIPI_DBI_FIFO_THROTTLE(port) _MMIO_MIPI(port, _MIPIA_DBI_FIFO_THROTTLE, _MIPIC_DBI_FIFO_THROTTLE)
7843 _MIPIA_DBI_FIFO_THROTTLE, _MIPIC_DBI_FIFO_THROTTLE)
7844#define DBI_FIFO_EMPTY_HALF (0 << 0) 7886#define DBI_FIFO_EMPTY_HALF (0 << 0)
7845#define DBI_FIFO_EMPTY_QUARTER (1 << 0) 7887#define DBI_FIFO_EMPTY_QUARTER (1 << 0)
7846#define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0) 7888#define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0)
@@ -7848,50 +7890,41 @@ enum skl_disp_power_wells {
7848/* regs below are bits 15:0 */ 7890/* regs below are bits 15:0 */
7849#define _MIPIA_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb028) 7891#define _MIPIA_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb028)
7850#define _MIPIC_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb828) 7892#define _MIPIC_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb828)
7851#define MIPI_HSYNC_PADDING_COUNT(port) _MIPI_PORT(port, \ 7893#define MIPI_HSYNC_PADDING_COUNT(port) _MMIO_MIPI(port, _MIPIA_HSYNC_PADDING_COUNT, _MIPIC_HSYNC_PADDING_COUNT)
7852 _MIPIA_HSYNC_PADDING_COUNT, _MIPIC_HSYNC_PADDING_COUNT)
7853 7894
7854#define _MIPIA_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb02c) 7895#define _MIPIA_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb02c)
7855#define _MIPIC_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb82c) 7896#define _MIPIC_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb82c)
7856#define MIPI_HBP_COUNT(port) _MIPI_PORT(port, _MIPIA_HBP_COUNT, \ 7897#define MIPI_HBP_COUNT(port) _MMIO_MIPI(port, _MIPIA_HBP_COUNT, _MIPIC_HBP_COUNT)
7857 _MIPIC_HBP_COUNT)
7858 7898
7859#define _MIPIA_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb030) 7899#define _MIPIA_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb030)
7860#define _MIPIC_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb830) 7900#define _MIPIC_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb830)
7861#define MIPI_HFP_COUNT(port) _MIPI_PORT(port, _MIPIA_HFP_COUNT, \ 7901#define MIPI_HFP_COUNT(port) _MMIO_MIPI(port, _MIPIA_HFP_COUNT, _MIPIC_HFP_COUNT)
7862 _MIPIC_HFP_COUNT)
7863 7902
7864#define _MIPIA_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb034) 7903#define _MIPIA_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb034)
7865#define _MIPIC_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb834) 7904#define _MIPIC_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb834)
7866#define MIPI_HACTIVE_AREA_COUNT(port) _MIPI_PORT(port, \ 7905#define MIPI_HACTIVE_AREA_COUNT(port) _MMIO_MIPI(port, _MIPIA_HACTIVE_AREA_COUNT, _MIPIC_HACTIVE_AREA_COUNT)
7867 _MIPIA_HACTIVE_AREA_COUNT, _MIPIC_HACTIVE_AREA_COUNT)
7868 7906
7869#define _MIPIA_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb038) 7907#define _MIPIA_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb038)
7870#define _MIPIC_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb838) 7908#define _MIPIC_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb838)
7871#define MIPI_VSYNC_PADDING_COUNT(port) _MIPI_PORT(port, \ 7909#define MIPI_VSYNC_PADDING_COUNT(port) _MMIO_MIPI(port, _MIPIA_VSYNC_PADDING_COUNT, _MIPIC_VSYNC_PADDING_COUNT)
7872 _MIPIA_VSYNC_PADDING_COUNT, _MIPIC_VSYNC_PADDING_COUNT)
7873 7910
7874#define _MIPIA_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb03c) 7911#define _MIPIA_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb03c)
7875#define _MIPIC_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb83c) 7912#define _MIPIC_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb83c)
7876#define MIPI_VBP_COUNT(port) _MIPI_PORT(port, _MIPIA_VBP_COUNT, \ 7913#define MIPI_VBP_COUNT(port) _MMIO_MIPI(port, _MIPIA_VBP_COUNT, _MIPIC_VBP_COUNT)
7877 _MIPIC_VBP_COUNT)
7878 7914
7879#define _MIPIA_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb040) 7915#define _MIPIA_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb040)
7880#define _MIPIC_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb840) 7916#define _MIPIC_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb840)
7881#define MIPI_VFP_COUNT(port) _MIPI_PORT(port, _MIPIA_VFP_COUNT, \ 7917#define MIPI_VFP_COUNT(port) _MMIO_MIPI(port, _MIPIA_VFP_COUNT, _MIPIC_VFP_COUNT)
7882 _MIPIC_VFP_COUNT)
7883 7918
7884#define _MIPIA_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb044) 7919#define _MIPIA_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb044)
7885#define _MIPIC_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb844) 7920#define _MIPIC_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb844)
7886#define MIPI_HIGH_LOW_SWITCH_COUNT(port) _MIPI_PORT(port, \ 7921#define MIPI_HIGH_LOW_SWITCH_COUNT(port) _MMIO_MIPI(port, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIC_HIGH_LOW_SWITCH_COUNT)
7887 _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIC_HIGH_LOW_SWITCH_COUNT)
7888 7922
7889/* regs above are bits 15:0 */ 7923/* regs above are bits 15:0 */
7890 7924
7891#define _MIPIA_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb048) 7925#define _MIPIA_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb048)
7892#define _MIPIC_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb848) 7926#define _MIPIC_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb848)
7893#define MIPI_DPI_CONTROL(port) _MIPI_PORT(port, _MIPIA_DPI_CONTROL, \ 7927#define MIPI_DPI_CONTROL(port) _MMIO_MIPI(port, _MIPIA_DPI_CONTROL, _MIPIC_DPI_CONTROL)
7894 _MIPIC_DPI_CONTROL)
7895#define DPI_LP_MODE (1 << 6) 7928#define DPI_LP_MODE (1 << 6)
7896#define BACKLIGHT_OFF (1 << 5) 7929#define BACKLIGHT_OFF (1 << 5)
7897#define BACKLIGHT_ON (1 << 4) 7930#define BACKLIGHT_ON (1 << 4)
@@ -7902,29 +7935,26 @@ enum skl_disp_power_wells {
7902 7935
7903#define _MIPIA_DPI_DATA (dev_priv->mipi_mmio_base + 0xb04c) 7936#define _MIPIA_DPI_DATA (dev_priv->mipi_mmio_base + 0xb04c)
7904#define _MIPIC_DPI_DATA (dev_priv->mipi_mmio_base + 0xb84c) 7937#define _MIPIC_DPI_DATA (dev_priv->mipi_mmio_base + 0xb84c)
7905#define MIPI_DPI_DATA(port) _MIPI_PORT(port, _MIPIA_DPI_DATA, \ 7938#define MIPI_DPI_DATA(port) _MMIO_MIPI(port, _MIPIA_DPI_DATA, _MIPIC_DPI_DATA)
7906 _MIPIC_DPI_DATA)
7907#define COMMAND_BYTE_SHIFT 0 7939#define COMMAND_BYTE_SHIFT 0
7908#define COMMAND_BYTE_MASK (0x3f << 0) 7940#define COMMAND_BYTE_MASK (0x3f << 0)
7909 7941
7910#define _MIPIA_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb050) 7942#define _MIPIA_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb050)
7911#define _MIPIC_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb850) 7943#define _MIPIC_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb850)
7912#define MIPI_INIT_COUNT(port) _MIPI_PORT(port, _MIPIA_INIT_COUNT, \ 7944#define MIPI_INIT_COUNT(port) _MMIO_MIPI(port, _MIPIA_INIT_COUNT, _MIPIC_INIT_COUNT)
7913 _MIPIC_INIT_COUNT)
7914#define MASTER_INIT_TIMER_SHIFT 0 7945#define MASTER_INIT_TIMER_SHIFT 0
7915#define MASTER_INIT_TIMER_MASK (0xffff << 0) 7946#define MASTER_INIT_TIMER_MASK (0xffff << 0)
7916 7947
7917#define _MIPIA_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb054) 7948#define _MIPIA_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb054)
7918#define _MIPIC_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb854) 7949#define _MIPIC_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb854)
7919#define MIPI_MAX_RETURN_PKT_SIZE(port) _MIPI_PORT(port, \ 7950#define MIPI_MAX_RETURN_PKT_SIZE(port) _MMIO_MIPI(port, \
7920 _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIC_MAX_RETURN_PKT_SIZE) 7951 _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIC_MAX_RETURN_PKT_SIZE)
7921#define MAX_RETURN_PKT_SIZE_SHIFT 0 7952#define MAX_RETURN_PKT_SIZE_SHIFT 0
7922#define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0) 7953#define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0)
7923 7954
7924#define _MIPIA_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb058) 7955#define _MIPIA_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb058)
7925#define _MIPIC_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb858) 7956#define _MIPIC_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb858)
7926#define MIPI_VIDEO_MODE_FORMAT(port) _MIPI_PORT(port, \ 7957#define MIPI_VIDEO_MODE_FORMAT(port) _MMIO_MIPI(port, _MIPIA_VIDEO_MODE_FORMAT, _MIPIC_VIDEO_MODE_FORMAT)
7927 _MIPIA_VIDEO_MODE_FORMAT, _MIPIC_VIDEO_MODE_FORMAT)
7928#define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4) 7958#define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4)
7929#define DISABLE_VIDEO_BTA (1 << 3) 7959#define DISABLE_VIDEO_BTA (1 << 3)
7930#define IP_TG_CONFIG (1 << 2) 7960#define IP_TG_CONFIG (1 << 2)
@@ -7934,8 +7964,7 @@ enum skl_disp_power_wells {
7934 7964
7935#define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c) 7965#define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c)
7936#define _MIPIC_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c) 7966#define _MIPIC_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c)
7937#define MIPI_EOT_DISABLE(port) _MIPI_PORT(port, _MIPIA_EOT_DISABLE, \ 7967#define MIPI_EOT_DISABLE(port) _MMIO_MIPI(port, _MIPIA_EOT_DISABLE, _MIPIC_EOT_DISABLE)
7938 _MIPIC_EOT_DISABLE)
7939#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7) 7968#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7)
7940#define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6) 7969#define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6)
7941#define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5) 7970#define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5)
@@ -7947,31 +7976,26 @@ enum skl_disp_power_wells {
7947 7976
7948#define _MIPIA_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb060) 7977#define _MIPIA_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb060)
7949#define _MIPIC_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb860) 7978#define _MIPIC_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb860)
7950#define MIPI_LP_BYTECLK(port) _MIPI_PORT(port, _MIPIA_LP_BYTECLK, \ 7979#define MIPI_LP_BYTECLK(port) _MMIO_MIPI(port, _MIPIA_LP_BYTECLK, _MIPIC_LP_BYTECLK)
7951 _MIPIC_LP_BYTECLK)
7952#define LP_BYTECLK_SHIFT 0 7980#define LP_BYTECLK_SHIFT 0
7953#define LP_BYTECLK_MASK (0xffff << 0) 7981#define LP_BYTECLK_MASK (0xffff << 0)
7954 7982
7955/* bits 31:0 */ 7983/* bits 31:0 */
7956#define _MIPIA_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb064) 7984#define _MIPIA_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb064)
7957#define _MIPIC_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb864) 7985#define _MIPIC_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb864)
7958#define MIPI_LP_GEN_DATA(port) _MIPI_PORT(port, _MIPIA_LP_GEN_DATA, \ 7986#define MIPI_LP_GEN_DATA(port) _MMIO_MIPI(port, _MIPIA_LP_GEN_DATA, _MIPIC_LP_GEN_DATA)
7959 _MIPIC_LP_GEN_DATA)
7960 7987
7961/* bits 31:0 */ 7988/* bits 31:0 */
7962#define _MIPIA_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb068) 7989#define _MIPIA_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb068)
7963#define _MIPIC_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb868) 7990#define _MIPIC_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb868)
7964#define MIPI_HS_GEN_DATA(port) _MIPI_PORT(port, _MIPIA_HS_GEN_DATA, \ 7991#define MIPI_HS_GEN_DATA(port) _MMIO_MIPI(port, _MIPIA_HS_GEN_DATA, _MIPIC_HS_GEN_DATA)
7965 _MIPIC_HS_GEN_DATA)
7966 7992
7967#define _MIPIA_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb06c) 7993#define _MIPIA_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb06c)
7968#define _MIPIC_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb86c) 7994#define _MIPIC_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb86c)
7969#define MIPI_LP_GEN_CTRL(port) _MIPI_PORT(port, _MIPIA_LP_GEN_CTRL, \ 7995#define MIPI_LP_GEN_CTRL(port) _MMIO_MIPI(port, _MIPIA_LP_GEN_CTRL, _MIPIC_LP_GEN_CTRL)
7970 _MIPIC_LP_GEN_CTRL)
7971#define _MIPIA_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb070) 7996#define _MIPIA_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb070)
7972#define _MIPIC_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb870) 7997#define _MIPIC_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb870)
7973#define MIPI_HS_GEN_CTRL(port) _MIPI_PORT(port, _MIPIA_HS_GEN_CTRL, \ 7998#define MIPI_HS_GEN_CTRL(port) _MMIO_MIPI(port, _MIPIA_HS_GEN_CTRL, _MIPIC_HS_GEN_CTRL)
7974 _MIPIC_HS_GEN_CTRL)
7975#define LONG_PACKET_WORD_COUNT_SHIFT 8 7999#define LONG_PACKET_WORD_COUNT_SHIFT 8
7976#define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8) 8000#define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8)
7977#define SHORT_PACKET_PARAM_SHIFT 8 8001#define SHORT_PACKET_PARAM_SHIFT 8
@@ -7984,8 +8008,7 @@ enum skl_disp_power_wells {
7984 8008
7985#define _MIPIA_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb074) 8009#define _MIPIA_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb074)
7986#define _MIPIC_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb874) 8010#define _MIPIC_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb874)
7987#define MIPI_GEN_FIFO_STAT(port) _MIPI_PORT(port, _MIPIA_GEN_FIFO_STAT, \ 8011#define MIPI_GEN_FIFO_STAT(port) _MMIO_MIPI(port, _MIPIA_GEN_FIFO_STAT, _MIPIC_GEN_FIFO_STAT)
7988 _MIPIC_GEN_FIFO_STAT)
7989#define DPI_FIFO_EMPTY (1 << 28) 8012#define DPI_FIFO_EMPTY (1 << 28)
7990#define DBI_FIFO_EMPTY (1 << 27) 8013#define DBI_FIFO_EMPTY (1 << 27)
7991#define LP_CTRL_FIFO_EMPTY (1 << 26) 8014#define LP_CTRL_FIFO_EMPTY (1 << 26)
@@ -8003,16 +8026,14 @@ enum skl_disp_power_wells {
8003 8026
8004#define _MIPIA_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb078) 8027#define _MIPIA_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb078)
8005#define _MIPIC_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb878) 8028#define _MIPIC_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb878)
8006#define MIPI_HS_LP_DBI_ENABLE(port) _MIPI_PORT(port, \ 8029#define MIPI_HS_LP_DBI_ENABLE(port) _MMIO_MIPI(port, _MIPIA_HS_LS_DBI_ENABLE, _MIPIC_HS_LS_DBI_ENABLE)
8007 _MIPIA_HS_LS_DBI_ENABLE, _MIPIC_HS_LS_DBI_ENABLE)
8008#define DBI_HS_LP_MODE_MASK (1 << 0) 8030#define DBI_HS_LP_MODE_MASK (1 << 0)
8009#define DBI_LP_MODE (1 << 0) 8031#define DBI_LP_MODE (1 << 0)
8010#define DBI_HS_MODE (0 << 0) 8032#define DBI_HS_MODE (0 << 0)
8011 8033
8012#define _MIPIA_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb080) 8034#define _MIPIA_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb080)
8013#define _MIPIC_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb880) 8035#define _MIPIC_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb880)
8014#define MIPI_DPHY_PARAM(port) _MIPI_PORT(port, _MIPIA_DPHY_PARAM, \ 8036#define MIPI_DPHY_PARAM(port) _MMIO_MIPI(port, _MIPIA_DPHY_PARAM, _MIPIC_DPHY_PARAM)
8015 _MIPIC_DPHY_PARAM)
8016#define EXIT_ZERO_COUNT_SHIFT 24 8037#define EXIT_ZERO_COUNT_SHIFT 24
8017#define EXIT_ZERO_COUNT_MASK (0x3f << 24) 8038#define EXIT_ZERO_COUNT_MASK (0x3f << 24)
8018#define TRAIL_COUNT_SHIFT 16 8039#define TRAIL_COUNT_SHIFT 16
@@ -8025,15 +8046,11 @@ enum skl_disp_power_wells {
8025/* bits 31:0 */ 8046/* bits 31:0 */
8026#define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084) 8047#define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084)
8027#define _MIPIC_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884) 8048#define _MIPIC_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884)
8028#define MIPI_DBI_BW_CTRL(port) _MIPI_PORT(port, _MIPIA_DBI_BW_CTRL, \ 8049#define MIPI_DBI_BW_CTRL(port) _MMIO_MIPI(port, _MIPIA_DBI_BW_CTRL, _MIPIC_DBI_BW_CTRL)
8029 _MIPIC_DBI_BW_CTRL) 8050
8030 8051#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base + 0xb088)
8031#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \ 8052#define _MIPIC_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base + 0xb888)
8032 + 0xb088) 8053#define MIPI_CLK_LANE_SWITCH_TIME_CNT(port) _MMIO_MIPI(port, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIC_CLK_LANE_SWITCH_TIME_CNT)
8033#define _MIPIC_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \
8034 + 0xb888)
8035#define MIPI_CLK_LANE_SWITCH_TIME_CNT(port) _MIPI_PORT(port, \
8036 _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIC_CLK_LANE_SWITCH_TIME_CNT)
8037#define LP_HS_SSW_CNT_SHIFT 16 8054#define LP_HS_SSW_CNT_SHIFT 16
8038#define LP_HS_SSW_CNT_MASK (0xffff << 16) 8055#define LP_HS_SSW_CNT_MASK (0xffff << 16)
8039#define HS_LP_PWR_SW_CNT_SHIFT 0 8056#define HS_LP_PWR_SW_CNT_SHIFT 0
@@ -8041,19 +8058,16 @@ enum skl_disp_power_wells {
8041 8058
8042#define _MIPIA_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb08c) 8059#define _MIPIA_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb08c)
8043#define _MIPIC_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb88c) 8060#define _MIPIC_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb88c)
8044#define MIPI_STOP_STATE_STALL(port) _MIPI_PORT(port, \ 8061#define MIPI_STOP_STATE_STALL(port) _MMIO_MIPI(port, _MIPIA_STOP_STATE_STALL, _MIPIC_STOP_STATE_STALL)
8045 _MIPIA_STOP_STATE_STALL, _MIPIC_STOP_STATE_STALL)
8046#define STOP_STATE_STALL_COUNTER_SHIFT 0 8062#define STOP_STATE_STALL_COUNTER_SHIFT 0
8047#define STOP_STATE_STALL_COUNTER_MASK (0xff << 0) 8063#define STOP_STATE_STALL_COUNTER_MASK (0xff << 0)
8048 8064
8049#define _MIPIA_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb090) 8065#define _MIPIA_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb090)
8050#define _MIPIC_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb890) 8066#define _MIPIC_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb890)
8051#define MIPI_INTR_STAT_REG_1(port) _MIPI_PORT(port, \ 8067#define MIPI_INTR_STAT_REG_1(port) _MMIO_MIPI(port, _MIPIA_INTR_STAT_REG_1, _MIPIC_INTR_STAT_REG_1)
8052 _MIPIA_INTR_STAT_REG_1, _MIPIC_INTR_STAT_REG_1)
8053#define _MIPIA_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb094) 8068#define _MIPIA_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb094)
8054#define _MIPIC_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb894) 8069#define _MIPIC_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb894)
8055#define MIPI_INTR_EN_REG_1(port) _MIPI_PORT(port, _MIPIA_INTR_EN_REG_1, \ 8070#define MIPI_INTR_EN_REG_1(port) _MMIO_MIPI(port, _MIPIA_INTR_EN_REG_1, _MIPIC_INTR_EN_REG_1)
8056 _MIPIC_INTR_EN_REG_1)
8057#define RX_CONTENTION_DETECTED (1 << 0) 8071#define RX_CONTENTION_DETECTED (1 << 0)
8058 8072
8059/* XXX: only pipe A ?!? */ 8073/* XXX: only pipe A ?!? */
@@ -8073,8 +8087,7 @@ enum skl_disp_power_wells {
8073 8087
8074#define _MIPIA_CTRL (dev_priv->mipi_mmio_base + 0xb104) 8088#define _MIPIA_CTRL (dev_priv->mipi_mmio_base + 0xb104)
8075#define _MIPIC_CTRL (dev_priv->mipi_mmio_base + 0xb904) 8089#define _MIPIC_CTRL (dev_priv->mipi_mmio_base + 0xb904)
8076#define MIPI_CTRL(port) _MIPI_PORT(port, _MIPIA_CTRL, \ 8090#define MIPI_CTRL(port) _MMIO_MIPI(port, _MIPIA_CTRL, _MIPIC_CTRL)
8077 _MIPIC_CTRL)
8078#define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */ 8091#define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */
8079#define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5) 8092#define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5)
8080#define ESCAPE_CLOCK_DIVIDER_1 (0 << 5) 8093#define ESCAPE_CLOCK_DIVIDER_1 (0 << 5)
@@ -8093,23 +8106,20 @@ enum skl_disp_power_wells {
8093 8106
8094#define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108) 8107#define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108)
8095#define _MIPIC_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908) 8108#define _MIPIC_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908)
8096#define MIPI_DATA_ADDRESS(port) _MIPI_PORT(port, _MIPIA_DATA_ADDRESS, \ 8109#define MIPI_DATA_ADDRESS(port) _MMIO_MIPI(port, _MIPIA_DATA_ADDRESS, _MIPIC_DATA_ADDRESS)
8097 _MIPIC_DATA_ADDRESS)
8098#define DATA_MEM_ADDRESS_SHIFT 5 8110#define DATA_MEM_ADDRESS_SHIFT 5
8099#define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5) 8111#define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5)
8100#define DATA_VALID (1 << 0) 8112#define DATA_VALID (1 << 0)
8101 8113
8102#define _MIPIA_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb10c) 8114#define _MIPIA_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb10c)
8103#define _MIPIC_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb90c) 8115#define _MIPIC_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb90c)
8104#define MIPI_DATA_LENGTH(port) _MIPI_PORT(port, _MIPIA_DATA_LENGTH, \ 8116#define MIPI_DATA_LENGTH(port) _MMIO_MIPI(port, _MIPIA_DATA_LENGTH, _MIPIC_DATA_LENGTH)
8105 _MIPIC_DATA_LENGTH)
8106#define DATA_LENGTH_SHIFT 0 8117#define DATA_LENGTH_SHIFT 0
8107#define DATA_LENGTH_MASK (0xfffff << 0) 8118#define DATA_LENGTH_MASK (0xfffff << 0)
8108 8119
8109#define _MIPIA_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb110) 8120#define _MIPIA_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb110)
8110#define _MIPIC_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb910) 8121#define _MIPIC_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb910)
8111#define MIPI_COMMAND_ADDRESS(port) _MIPI_PORT(port, \ 8122#define MIPI_COMMAND_ADDRESS(port) _MMIO_MIPI(port, _MIPIA_COMMAND_ADDRESS, _MIPIC_COMMAND_ADDRESS)
8112 _MIPIA_COMMAND_ADDRESS, _MIPIC_COMMAND_ADDRESS)
8113#define COMMAND_MEM_ADDRESS_SHIFT 5 8123#define COMMAND_MEM_ADDRESS_SHIFT 5
8114#define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5) 8124#define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5)
8115#define AUTO_PWG_ENABLE (1 << 2) 8125#define AUTO_PWG_ENABLE (1 << 2)
@@ -8118,21 +8128,17 @@ enum skl_disp_power_wells {
8118 8128
8119#define _MIPIA_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb114) 8129#define _MIPIA_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb114)
8120#define _MIPIC_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb914) 8130#define _MIPIC_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb914)
8121#define MIPI_COMMAND_LENGTH(port) _MIPI_PORT(port, _MIPIA_COMMAND_LENGTH, \ 8131#define MIPI_COMMAND_LENGTH(port) _MMIO_MIPI(port, _MIPIA_COMMAND_LENGTH, _MIPIC_COMMAND_LENGTH)
8122 _MIPIC_COMMAND_LENGTH)
8123#define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */ 8132#define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */
8124#define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n))) 8133#define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n)))
8125 8134
8126#define _MIPIA_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb118) 8135#define _MIPIA_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb118)
8127#define _MIPIC_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb918) 8136#define _MIPIC_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb918)
8128#define MIPI_READ_DATA_RETURN(port, n) \ 8137#define MIPI_READ_DATA_RETURN(port, n) _MMIO(_MIPI(port, _MIPIA_READ_DATA_RETURN0, _MIPIC_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */
8129 (_MIPI_PORT(port, _MIPIA_READ_DATA_RETURN0, _MIPIC_READ_DATA_RETURN0) \
8130 + 4 * (n)) /* n: 0...7 */
8131 8138
8132#define _MIPIA_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb138) 8139#define _MIPIA_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb138)
8133#define _MIPIC_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb938) 8140#define _MIPIC_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb938)
8134#define MIPI_READ_DATA_VALID(port) _MIPI_PORT(port, \ 8141#define MIPI_READ_DATA_VALID(port) _MMIO_MIPI(port, _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID)
8135 _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID)
8136#define READ_DATA_VALID(n) (1 << (n)) 8142#define READ_DATA_VALID(n) (1 << (n))
8137 8143
8138/* For UMS only (deprecated): */ 8144/* For UMS only (deprecated): */
@@ -8140,12 +8146,12 @@ enum skl_disp_power_wells {
8140#define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800) 8146#define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800)
8141 8147
8142/* MOCS (Memory Object Control State) registers */ 8148/* MOCS (Memory Object Control State) registers */
8143#define GEN9_LNCFCMOCS0 0xb020 /* L3 Cache Control base */ 8149#define GEN9_LNCFCMOCS(i) _MMIO(0xb020 + (i) * 4) /* L3 Cache Control */
8144 8150
8145#define GEN9_GFX_MOCS_0 0xc800 /* Graphics MOCS base register*/ 8151#define GEN9_GFX_MOCS(i) _MMIO(0xc800 + (i) * 4) /* Graphics MOCS registers */
8146#define GEN9_MFX0_MOCS_0 0xc900 /* Media 0 MOCS base register*/ 8152#define GEN9_MFX0_MOCS(i) _MMIO(0xc900 + (i) * 4) /* Media 0 MOCS registers */
8147#define GEN9_MFX1_MOCS_0 0xca00 /* Media 1 MOCS base register*/ 8153#define GEN9_MFX1_MOCS(i) _MMIO(0xca00 + (i) * 4) /* Media 1 MOCS registers */
8148#define GEN9_VEBOX_MOCS_0 0xcb00 /* Video MOCS base register*/ 8154#define GEN9_VEBOX_MOCS(i) _MMIO(0xcb00 + (i) * 4) /* Video MOCS registers */
8149#define GEN9_BLT_MOCS_0 0xcc00 /* Blitter MOCS base register*/ 8155#define GEN9_BLT_MOCS(i) _MMIO(0xcc00 + (i) * 4) /* Blitter MOCS registers */
8150 8156
8151#endif /* _I915_REG_H_ */ 8157#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 50ce9ce2b269..f929c61f0fa2 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -35,7 +35,8 @@
35#define dev_to_drm_minor(d) dev_get_drvdata((d)) 35#define dev_to_drm_minor(d) dev_get_drvdata((d))
36 36
37#ifdef CONFIG_PM 37#ifdef CONFIG_PM
38static u32 calc_residency(struct drm_device *dev, const u32 reg) 38static u32 calc_residency(struct drm_device *dev,
39 i915_reg_t reg)
39{ 40{
40 struct drm_i915_private *dev_priv = dev->dev_private; 41 struct drm_i915_private *dev_priv = dev->dev_private;
41 u64 raw_time; /* 32b value may overflow during fixed point math */ 42 u64 raw_time; /* 32b value may overflow during fixed point math */
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 04fe8491c8b6..52b2d409945d 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -664,7 +664,7 @@ TRACE_EVENT(i915_flip_complete,
664); 664);
665 665
666TRACE_EVENT_CONDITION(i915_reg_rw, 666TRACE_EVENT_CONDITION(i915_reg_rw,
667 TP_PROTO(bool write, u32 reg, u64 val, int len, bool trace), 667 TP_PROTO(bool write, i915_reg_t reg, u64 val, int len, bool trace),
668 668
669 TP_ARGS(write, reg, val, len, trace), 669 TP_ARGS(write, reg, val, len, trace),
670 670
@@ -679,7 +679,7 @@ TRACE_EVENT_CONDITION(i915_reg_rw,
679 679
680 TP_fast_assign( 680 TP_fast_assign(
681 __entry->val = (u64)val; 681 __entry->val = (u64)val;
682 __entry->reg = reg; 682 __entry->reg = i915_mmio_reg_offset(reg);
683 __entry->write = write; 683 __entry->write = write;
684 __entry->len = len; 684 __entry->len = len;
685 ), 685 ),
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 5eee75bff170..dea7429be4d0 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -69,13 +69,13 @@ void i915_check_vgpu(struct drm_device *dev)
69 if (!IS_HASWELL(dev)) 69 if (!IS_HASWELL(dev))
70 return; 70 return;
71 71
72 magic = readq(dev_priv->regs + vgtif_reg(magic)); 72 magic = __raw_i915_read64(dev_priv, vgtif_reg(magic));
73 if (magic != VGT_MAGIC) 73 if (magic != VGT_MAGIC)
74 return; 74 return;
75 75
76 version = INTEL_VGT_IF_VERSION_ENCODE( 76 version = INTEL_VGT_IF_VERSION_ENCODE(
77 readw(dev_priv->regs + vgtif_reg(version_major)), 77 __raw_i915_read16(dev_priv, vgtif_reg(version_major)),
78 readw(dev_priv->regs + vgtif_reg(version_minor))); 78 __raw_i915_read16(dev_priv, vgtif_reg(version_minor)));
79 if (version != INTEL_VGT_IF_VERSION) { 79 if (version != INTEL_VGT_IF_VERSION) {
80 DRM_INFO("VGT interface version mismatch!\n"); 80 DRM_INFO("VGT interface version mismatch!\n");
81 return; 81 return;
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
index 21c97f44d637..3c83b47b5f69 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.h
+++ b/drivers/gpu/drm/i915/i915_vgpu.h
@@ -92,14 +92,10 @@ struct vgt_if {
92 uint32_t g2v_notify; 92 uint32_t g2v_notify;
93 uint32_t rsv6[7]; 93 uint32_t rsv6[7];
94 94
95 uint32_t pdp0_lo; 95 struct {
96 uint32_t pdp0_hi; 96 uint32_t lo;
97 uint32_t pdp1_lo; 97 uint32_t hi;
98 uint32_t pdp1_hi; 98 } pdp[4];
99 uint32_t pdp2_lo;
100 uint32_t pdp2_hi;
101 uint32_t pdp3_lo;
102 uint32_t pdp3_hi;
103 99
104 uint32_t execlist_context_descriptor_lo; 100 uint32_t execlist_context_descriptor_lo;
105 uint32_t execlist_context_descriptor_hi; 101 uint32_t execlist_context_descriptor_hi;
@@ -108,7 +104,7 @@ struct vgt_if {
108} __packed; 104} __packed;
109 105
110#define vgtif_reg(x) \ 106#define vgtif_reg(x) \
111 (VGT_PVINFO_PAGE + (long)&((struct vgt_if *)NULL)->x) 107 _MMIO((VGT_PVINFO_PAGE + (long)&((struct vgt_if *)NULL)->x))
112 108
113/* vGPU display status to be used by the host side */ 109/* vGPU display status to be used by the host side */
114#define VGT_DRV_DISPLAY_NOT_READY 0 110#define VGT_DRV_DISPLAY_NOT_READY 0
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index f1975f267710..643f342de33b 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -94,6 +94,7 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
94 __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base); 94 __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base);
95 95
96 crtc_state->update_pipe = false; 96 crtc_state->update_pipe = false;
97 crtc_state->disable_lp_wm = false;
97 98
98 return &crtc_state->base; 99 return &crtc_state->base;
99} 100}
@@ -205,8 +206,6 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
205 * but since this plane is unchanged just do the 206 * but since this plane is unchanged just do the
206 * minimum required validation. 207 * minimum required validation.
207 */ 208 */
208 if (plane->type == DRM_PLANE_TYPE_PRIMARY)
209 intel_crtc->atomic.wait_for_flips = true;
210 crtc_state->base.planes_changed = true; 209 crtc_state->base.planes_changed = true;
211 } 210 }
212 211
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index a11980696595..c6bb0fc1edfb 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -84,6 +84,7 @@ intel_plane_duplicate_state(struct drm_plane *plane)
84 state = &intel_state->base; 84 state = &intel_state->base;
85 85
86 __drm_atomic_helper_plane_duplicate_state(plane, state); 86 __drm_atomic_helper_plane_duplicate_state(plane, state);
87 intel_state->wait_req = NULL;
87 88
88 return state; 89 return state;
89} 90}
@@ -100,6 +101,7 @@ void
100intel_plane_destroy_state(struct drm_plane *plane, 101intel_plane_destroy_state(struct drm_plane *plane,
101 struct drm_plane_state *state) 102 struct drm_plane_state *state)
102{ 103{
104 WARN_ON(state && to_intel_plane_state(state)->wait_req);
103 drm_atomic_helper_plane_destroy_state(plane, state); 105 drm_atomic_helper_plane_destroy_state(plane, state);
104} 106}
105 107
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 4dccd9b003a1..9aa83e71b792 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -161,9 +161,9 @@ static bool audio_rate_need_prog(struct intel_crtc *crtc,
161} 161}
162 162
163static bool intel_eld_uptodate(struct drm_connector *connector, 163static bool intel_eld_uptodate(struct drm_connector *connector,
164 int reg_eldv, uint32_t bits_eldv, 164 i915_reg_t reg_eldv, uint32_t bits_eldv,
165 int reg_elda, uint32_t bits_elda, 165 i915_reg_t reg_elda, uint32_t bits_elda,
166 int reg_edid) 166 i915_reg_t reg_edid)
167{ 167{
168 struct drm_i915_private *dev_priv = connector->dev->dev_private; 168 struct drm_i915_private *dev_priv = connector->dev->dev_private;
169 uint8_t *eld = connector->eld; 169 uint8_t *eld = connector->eld;
@@ -364,8 +364,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder)
364 enum port port = intel_dig_port->port; 364 enum port port = intel_dig_port->port;
365 enum pipe pipe = intel_crtc->pipe; 365 enum pipe pipe = intel_crtc->pipe;
366 uint32_t tmp, eldv; 366 uint32_t tmp, eldv;
367 int aud_config; 367 i915_reg_t aud_config, aud_cntrl_st2;
368 int aud_cntrl_st2;
369 368
370 DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n", 369 DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n",
371 port_name(port), pipe_name(pipe)); 370 port_name(port), pipe_name(pipe));
@@ -416,10 +415,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
416 uint32_t eldv; 415 uint32_t eldv;
417 uint32_t tmp; 416 uint32_t tmp;
418 int len, i; 417 int len, i;
419 int hdmiw_hdmiedid; 418 i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2;
420 int aud_config;
421 int aud_cntl_st;
422 int aud_cntrl_st2;
423 419
424 DRM_DEBUG_KMS("Enable audio codec on port %c, pipe %c, %u bytes ELD\n", 420 DRM_DEBUG_KMS("Enable audio codec on port %c, pipe %c, %u bytes ELD\n",
425 port_name(port), pipe_name(pipe), drm_eld_size(eld)); 421 port_name(port), pipe_name(pipe), drm_eld_size(eld));
@@ -591,7 +587,7 @@ static void i915_audio_component_codec_wake_override(struct device *dev,
591 struct drm_i915_private *dev_priv = dev_to_i915(dev); 587 struct drm_i915_private *dev_priv = dev_to_i915(dev);
592 u32 tmp; 588 u32 tmp;
593 589
594 if (!IS_SKYLAKE(dev_priv)) 590 if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
595 return; 591 return;
596 592
597 /* 593 /*
@@ -642,10 +638,11 @@ static int i915_audio_component_sync_audio_rate(struct device *dev,
642 u32 tmp; 638 u32 tmp;
643 int n; 639 int n;
644 640
645 /* HSW, BDW SKL need this fix */ 641 /* HSW, BDW, SKL, KBL need this fix */
646 if (!IS_SKYLAKE(dev_priv) && 642 if (!IS_SKYLAKE(dev_priv) &&
647 !IS_BROADWELL(dev_priv) && 643 !IS_KABYLAKE(dev_priv) &&
648 !IS_HASWELL(dev_priv)) 644 !IS_BROADWELL(dev_priv) &&
645 !IS_HASWELL(dev_priv))
649 return 0; 646 return 0;
650 647
651 mutex_lock(&dev_priv->av_mutex); 648 mutex_lock(&dev_priv->av_mutex);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index ce82f9c7df24..070470fe9a91 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -356,7 +356,10 @@ parse_general_features(struct drm_i915_private *dev_priv,
356 general = find_section(bdb, BDB_GENERAL_FEATURES); 356 general = find_section(bdb, BDB_GENERAL_FEATURES);
357 if (general) { 357 if (general) {
358 dev_priv->vbt.int_tv_support = general->int_tv_support; 358 dev_priv->vbt.int_tv_support = general->int_tv_support;
359 dev_priv->vbt.int_crt_support = general->int_crt_support; 359 /* int_crt_support can't be trusted on earlier platforms */
360 if (bdb->version >= 155 &&
361 (HAS_DDI(dev_priv) || IS_VALLEYVIEW(dev_priv)))
362 dev_priv->vbt.int_crt_support = general->int_crt_support;
360 dev_priv->vbt.lvds_use_ssc = general->enable_ssc; 363 dev_priv->vbt.lvds_use_ssc = general->enable_ssc;
361 dev_priv->vbt.lvds_ssc_freq = 364 dev_priv->vbt.lvds_ssc_freq =
362 intel_bios_ssc_frequency(dev, general->ssc_freq); 365 intel_bios_ssc_frequency(dev, general->ssc_freq);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index b84aaa0bb48a..9285fc1e64ee 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -50,7 +50,7 @@ struct intel_crt {
50 * encoder's enable/disable callbacks */ 50 * encoder's enable/disable callbacks */
51 struct intel_connector *connector; 51 struct intel_connector *connector;
52 bool force_hotplug_required; 52 bool force_hotplug_required;
53 u32 adpa_reg; 53 i915_reg_t adpa_reg;
54}; 54};
55 55
56static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder) 56static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
@@ -138,18 +138,6 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
138 pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder); 138 pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder);
139} 139}
140 140
141static void hsw_crt_pre_enable(struct intel_encoder *encoder)
142{
143 struct drm_device *dev = encoder->base.dev;
144 struct drm_i915_private *dev_priv = dev->dev_private;
145
146 WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL already enabled\n");
147 I915_WRITE(SPLL_CTL,
148 SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC);
149 POSTING_READ(SPLL_CTL);
150 udelay(20);
151}
152
153/* Note: The caller is required to filter out dpms modes not supported by the 141/* Note: The caller is required to filter out dpms modes not supported by the
154 * platform. */ 142 * platform. */
155static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) 143static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
@@ -216,19 +204,6 @@ static void pch_post_disable_crt(struct intel_encoder *encoder)
216 intel_disable_crt(encoder); 204 intel_disable_crt(encoder);
217} 205}
218 206
219static void hsw_crt_post_disable(struct intel_encoder *encoder)
220{
221 struct drm_device *dev = encoder->base.dev;
222 struct drm_i915_private *dev_priv = dev->dev_private;
223 uint32_t val;
224
225 DRM_DEBUG_KMS("Disabling SPLL\n");
226 val = I915_READ(SPLL_CTL);
227 WARN_ON(!(val & SPLL_PLL_ENABLE));
228 I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
229 POSTING_READ(SPLL_CTL);
230}
231
232static void intel_enable_crt(struct intel_encoder *encoder) 207static void intel_enable_crt(struct intel_encoder *encoder)
233{ 208{
234 struct intel_crt *crt = intel_encoder_to_crt(encoder); 209 struct intel_crt *crt = intel_encoder_to_crt(encoder);
@@ -280,6 +255,10 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
280 if (HAS_DDI(dev)) { 255 if (HAS_DDI(dev)) {
281 pipe_config->ddi_pll_sel = PORT_CLK_SEL_SPLL; 256 pipe_config->ddi_pll_sel = PORT_CLK_SEL_SPLL;
282 pipe_config->port_clock = 135000 * 2; 257 pipe_config->port_clock = 135000 * 2;
258
259 pipe_config->dpll_hw_state.wrpll = 0;
260 pipe_config->dpll_hw_state.spll =
261 SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
283 } 262 }
284 263
285 return true; 264 return true;
@@ -501,12 +480,8 @@ intel_crt_load_detect(struct intel_crt *crt)
501 uint32_t vsample; 480 uint32_t vsample;
502 uint32_t vblank, vblank_start, vblank_end; 481 uint32_t vblank, vblank_start, vblank_end;
503 uint32_t dsl; 482 uint32_t dsl;
504 uint32_t bclrpat_reg; 483 i915_reg_t bclrpat_reg, vtotal_reg,
505 uint32_t vtotal_reg; 484 vblank_reg, vsync_reg, pipeconf_reg, pipe_dsl_reg;
506 uint32_t vblank_reg;
507 uint32_t vsync_reg;
508 uint32_t pipeconf_reg;
509 uint32_t pipe_dsl_reg;
510 uint8_t st00; 485 uint8_t st00;
511 enum drm_connector_status status; 486 enum drm_connector_status status;
512 487
@@ -539,7 +514,7 @@ intel_crt_load_detect(struct intel_crt *crt)
539 /* Wait for next Vblank to substitue 514 /* Wait for next Vblank to substitue
540 * border color for Color info */ 515 * border color for Color info */
541 intel_wait_for_vblank(dev, pipe); 516 intel_wait_for_vblank(dev, pipe);
542 st00 = I915_READ8(VGA_MSR_WRITE); 517 st00 = I915_READ8(_VGA_MSR_WRITE);
543 status = ((st00 & (1 << 4)) != 0) ? 518 status = ((st00 & (1 << 4)) != 0) ?
544 connector_status_connected : 519 connector_status_connected :
545 connector_status_disconnected; 520 connector_status_disconnected;
@@ -584,7 +559,7 @@ intel_crt_load_detect(struct intel_crt *crt)
584 do { 559 do {
585 count++; 560 count++;
586 /* Read the ST00 VGA status register */ 561 /* Read the ST00 VGA status register */
587 st00 = I915_READ8(VGA_MSR_WRITE); 562 st00 = I915_READ8(_VGA_MSR_WRITE);
588 if (st00 & (1 << 4)) 563 if (st00 & (1 << 4))
589 detect++; 564 detect++;
590 } while ((I915_READ(pipe_dsl_reg) == dsl)); 565 } while ((I915_READ(pipe_dsl_reg) == dsl));
@@ -802,11 +777,37 @@ void intel_crt_init(struct drm_device *dev)
802 struct intel_crt *crt; 777 struct intel_crt *crt;
803 struct intel_connector *intel_connector; 778 struct intel_connector *intel_connector;
804 struct drm_i915_private *dev_priv = dev->dev_private; 779 struct drm_i915_private *dev_priv = dev->dev_private;
780 i915_reg_t adpa_reg;
781 u32 adpa;
805 782
806 /* Skip machines without VGA that falsely report hotplug events */ 783 /* Skip machines without VGA that falsely report hotplug events */
807 if (dmi_check_system(intel_no_crt)) 784 if (dmi_check_system(intel_no_crt))
808 return; 785 return;
809 786
787 if (HAS_PCH_SPLIT(dev))
788 adpa_reg = PCH_ADPA;
789 else if (IS_VALLEYVIEW(dev))
790 adpa_reg = VLV_ADPA;
791 else
792 adpa_reg = ADPA;
793
794 adpa = I915_READ(adpa_reg);
795 if ((adpa & ADPA_DAC_ENABLE) == 0) {
796 /*
797 * On some machines (some IVB at least) CRT can be
798 * fused off, but there's no known fuse bit to
799 * indicate that. On these machine the ADPA register
800 * works normally, except the DAC enable bit won't
801 * take. So the only way to tell is attempt to enable
802 * it and see what happens.
803 */
804 I915_WRITE(adpa_reg, adpa | ADPA_DAC_ENABLE |
805 ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
806 if ((I915_READ(adpa_reg) & ADPA_DAC_ENABLE) == 0)
807 return;
808 I915_WRITE(adpa_reg, adpa);
809 }
810
810 crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL); 811 crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL);
811 if (!crt) 812 if (!crt)
812 return; 813 return;
@@ -823,7 +824,7 @@ void intel_crt_init(struct drm_device *dev)
823 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); 824 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
824 825
825 drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs, 826 drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs,
826 DRM_MODE_ENCODER_DAC); 827 DRM_MODE_ENCODER_DAC, NULL);
827 828
828 intel_connector_attach_encoder(intel_connector, &crt->base); 829 intel_connector_attach_encoder(intel_connector, &crt->base);
829 830
@@ -840,12 +841,7 @@ void intel_crt_init(struct drm_device *dev)
840 connector->interlace_allowed = 1; 841 connector->interlace_allowed = 1;
841 connector->doublescan_allowed = 0; 842 connector->doublescan_allowed = 0;
842 843
843 if (HAS_PCH_SPLIT(dev)) 844 crt->adpa_reg = adpa_reg;
844 crt->adpa_reg = PCH_ADPA;
845 else if (IS_VALLEYVIEW(dev))
846 crt->adpa_reg = VLV_ADPA;
847 else
848 crt->adpa_reg = ADPA;
849 845
850 crt->base.compute_config = intel_crt_compute_config; 846 crt->base.compute_config = intel_crt_compute_config;
851 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev)) { 847 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev)) {
@@ -860,8 +856,6 @@ void intel_crt_init(struct drm_device *dev)
860 if (HAS_DDI(dev)) { 856 if (HAS_DDI(dev)) {
861 crt->base.get_config = hsw_crt_get_config; 857 crt->base.get_config = hsw_crt_get_config;
862 crt->base.get_hw_state = intel_ddi_get_hw_state; 858 crt->base.get_hw_state = intel_ddi_get_hw_state;
863 crt->base.pre_enable = hsw_crt_pre_enable;
864 crt->base.post_disable = hsw_crt_post_disable;
865 } else { 859 } else {
866 crt->base.get_config = intel_crt_get_config; 860 crt->base.get_config = intel_crt_get_config;
867 crt->base.get_hw_state = intel_crt_get_hw_state; 861 crt->base.get_hw_state = intel_crt_get_hw_state;
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 9e530a739354..6c6a6695e99c 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -47,21 +47,10 @@
47MODULE_FIRMWARE(I915_CSR_SKL); 47MODULE_FIRMWARE(I915_CSR_SKL);
48MODULE_FIRMWARE(I915_CSR_BXT); 48MODULE_FIRMWARE(I915_CSR_BXT);
49 49
50/* 50#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23)
51* SKL CSR registers for DC5 and DC6 51
52*/
53#define CSR_PROGRAM(i) (0x80000 + (i) * 4)
54#define CSR_SSP_BASE_ADDR_GEN9 0x00002FC0
55#define CSR_HTP_ADDR_SKL 0x00500034
56#define CSR_SSP_BASE 0x8F074
57#define CSR_HTP_SKL 0x8F004
58#define CSR_LAST_WRITE 0x8F034
59#define CSR_LAST_WRITE_VALUE 0xc003b400
60/* MMIO address range for CSR program (0x80000 - 0x82FFF) */
61#define CSR_MAX_FW_SIZE 0x2FFF 52#define CSR_MAX_FW_SIZE 0x2FFF
62#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF 53#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
63#define CSR_MMIO_START_RANGE 0x80000
64#define CSR_MMIO_END_RANGE 0x8FFFF
65 54
66struct intel_css_header { 55struct intel_css_header {
67 /* 0x09 for DMC */ 56 /* 0x09 for DMC */
@@ -178,166 +167,134 @@ struct stepping_info {
178}; 167};
179 168
180static const struct stepping_info skl_stepping_info[] = { 169static const struct stepping_info skl_stepping_info[] = {
181 {'A', '0'}, {'B', '0'}, {'C', '0'}, 170 {'A', '0'}, {'B', '0'}, {'C', '0'},
182 {'D', '0'}, {'E', '0'}, {'F', '0'}, 171 {'D', '0'}, {'E', '0'}, {'F', '0'},
183 {'G', '0'}, {'H', '0'}, {'I', '0'} 172 {'G', '0'}, {'H', '0'}, {'I', '0'}
184}; 173};
185 174
186static struct stepping_info bxt_stepping_info[] = { 175static const struct stepping_info bxt_stepping_info[] = {
187 {'A', '0'}, {'A', '1'}, {'A', '2'}, 176 {'A', '0'}, {'A', '1'}, {'A', '2'},
188 {'B', '0'}, {'B', '1'}, {'B', '2'} 177 {'B', '0'}, {'B', '1'}, {'B', '2'}
189}; 178};
190 179
191static char intel_get_stepping(struct drm_device *dev) 180static const struct stepping_info *intel_get_stepping_info(struct drm_device *dev)
192{
193 if (IS_SKYLAKE(dev) && (dev->pdev->revision <
194 ARRAY_SIZE(skl_stepping_info)))
195 return skl_stepping_info[dev->pdev->revision].stepping;
196 else if (IS_BROXTON(dev) && (dev->pdev->revision <
197 ARRAY_SIZE(bxt_stepping_info)))
198 return bxt_stepping_info[dev->pdev->revision].stepping;
199 else
200 return -ENODATA;
201}
202
203static char intel_get_substepping(struct drm_device *dev)
204{ 181{
205 if (IS_SKYLAKE(dev) && (dev->pdev->revision < 182 const struct stepping_info *si;
206 ARRAY_SIZE(skl_stepping_info))) 183 unsigned int size;
207 return skl_stepping_info[dev->pdev->revision].substepping; 184
208 else if (IS_BROXTON(dev) && (dev->pdev->revision < 185 if (IS_SKYLAKE(dev)) {
209 ARRAY_SIZE(bxt_stepping_info))) 186 size = ARRAY_SIZE(skl_stepping_info);
210 return bxt_stepping_info[dev->pdev->revision].substepping; 187 si = skl_stepping_info;
211 else 188 } else if (IS_BROXTON(dev)) {
212 return -ENODATA; 189 size = ARRAY_SIZE(bxt_stepping_info);
213} 190 si = bxt_stepping_info;
214 191 } else {
215/** 192 return NULL;
216 * intel_csr_load_status_get() - to get firmware loading status. 193 }
217 * @dev_priv: i915 device.
218 *
219 * This function helps to get the firmware loading status.
220 *
221 * Return: Firmware loading status.
222 */
223enum csr_state intel_csr_load_status_get(struct drm_i915_private *dev_priv)
224{
225 enum csr_state state;
226 194
227 mutex_lock(&dev_priv->csr_lock); 195 if (INTEL_REVID(dev) < size)
228 state = dev_priv->csr.state; 196 return si + INTEL_REVID(dev);
229 mutex_unlock(&dev_priv->csr_lock);
230 197
231 return state; 198 return NULL;
232}
233
234/**
235 * intel_csr_load_status_set() - help to set firmware loading status.
236 * @dev_priv: i915 device.
237 * @state: enumeration of firmware loading status.
238 *
239 * Set the firmware loading status.
240 */
241void intel_csr_load_status_set(struct drm_i915_private *dev_priv,
242 enum csr_state state)
243{
244 mutex_lock(&dev_priv->csr_lock);
245 dev_priv->csr.state = state;
246 mutex_unlock(&dev_priv->csr_lock);
247} 199}
248 200
249/** 201/**
250 * intel_csr_load_program() - write the firmware from memory to register. 202 * intel_csr_load_program() - write the firmware from memory to register.
251 * @dev: drm device. 203 * @dev_priv: i915 drm device.
252 * 204 *
253 * CSR firmware is read from a .bin file and kept in internal memory one time. 205 * CSR firmware is read from a .bin file and kept in internal memory one time.
254 * Everytime display comes back from low power state this function is called to 206 * Everytime display comes back from low power state this function is called to
255 * copy the firmware from internal memory to registers. 207 * copy the firmware from internal memory to registers.
256 */ 208 */
257void intel_csr_load_program(struct drm_device *dev) 209void intel_csr_load_program(struct drm_i915_private *dev_priv)
258{ 210{
259 struct drm_i915_private *dev_priv = dev->dev_private;
260 u32 *payload = dev_priv->csr.dmc_payload; 211 u32 *payload = dev_priv->csr.dmc_payload;
261 uint32_t i, fw_size; 212 uint32_t i, fw_size;
262 213
263 if (!IS_GEN9(dev)) { 214 if (!IS_GEN9(dev_priv)) {
264 DRM_ERROR("No CSR support available for this platform\n"); 215 DRM_ERROR("No CSR support available for this platform\n");
265 return; 216 return;
266 } 217 }
267 218
268 /* 219 if (!dev_priv->csr.dmc_payload) {
269 * FIXME: Firmware gets lost on S3/S4, but not when entering system 220 DRM_ERROR("Tried to program CSR with empty payload\n");
270 * standby or suspend-to-idle (which is just like forced runtime pm).
271 * Unfortunately the ACPI subsystem doesn't yet give us a way to
272 * differentiate this, hence figure it out with this hack.
273 */
274 if (I915_READ(CSR_PROGRAM(0)))
275 return; 221 return;
222 }
276 223
277 mutex_lock(&dev_priv->csr_lock);
278 fw_size = dev_priv->csr.dmc_fw_size; 224 fw_size = dev_priv->csr.dmc_fw_size;
279 for (i = 0; i < fw_size; i++) 225 for (i = 0; i < fw_size; i++)
280 I915_WRITE(CSR_PROGRAM(i), payload[i]); 226 I915_WRITE(CSR_PROGRAM(i), payload[i]);
281 227
282 for (i = 0; i < dev_priv->csr.mmio_count; i++) { 228 for (i = 0; i < dev_priv->csr.mmio_count; i++) {
283 I915_WRITE(dev_priv->csr.mmioaddr[i], 229 I915_WRITE(dev_priv->csr.mmioaddr[i],
284 dev_priv->csr.mmiodata[i]); 230 dev_priv->csr.mmiodata[i]);
285 } 231 }
286
287 dev_priv->csr.state = FW_LOADED;
288 mutex_unlock(&dev_priv->csr_lock);
289} 232}
290 233
291static void finish_csr_load(const struct firmware *fw, void *context) 234static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
235 const struct firmware *fw)
292{ 236{
293 struct drm_i915_private *dev_priv = context;
294 struct drm_device *dev = dev_priv->dev; 237 struct drm_device *dev = dev_priv->dev;
295 struct intel_css_header *css_header; 238 struct intel_css_header *css_header;
296 struct intel_package_header *package_header; 239 struct intel_package_header *package_header;
297 struct intel_dmc_header *dmc_header; 240 struct intel_dmc_header *dmc_header;
298 struct intel_csr *csr = &dev_priv->csr; 241 struct intel_csr *csr = &dev_priv->csr;
299 char stepping = intel_get_stepping(dev); 242 const struct stepping_info *stepping_info = intel_get_stepping_info(dev);
300 char substepping = intel_get_substepping(dev); 243 char stepping, substepping;
301 uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes; 244 uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
302 uint32_t i; 245 uint32_t i;
303 uint32_t *dmc_payload; 246 uint32_t *dmc_payload;
304 bool fw_loaded = false;
305 247
306 if (!fw) { 248 if (!fw)
307 i915_firmware_load_error_print(csr->fw_path, 0); 249 return NULL;
308 goto out;
309 }
310 250
311 if ((stepping == -ENODATA) || (substepping == -ENODATA)) { 251 if (!stepping_info) {
312 DRM_ERROR("Unknown stepping info, firmware loading failed\n"); 252 DRM_ERROR("Unknown stepping info, firmware loading failed\n");
313 goto out; 253 return NULL;
314 } 254 }
315 255
256 stepping = stepping_info->stepping;
257 substepping = stepping_info->substepping;
258
316 /* Extract CSS Header information*/ 259 /* Extract CSS Header information*/
317 css_header = (struct intel_css_header *)fw->data; 260 css_header = (struct intel_css_header *)fw->data;
318 if (sizeof(struct intel_css_header) != 261 if (sizeof(struct intel_css_header) !=
319 (css_header->header_len * 4)) { 262 (css_header->header_len * 4)) {
320 DRM_ERROR("Firmware has wrong CSS header length %u bytes\n", 263 DRM_ERROR("Firmware has wrong CSS header length %u bytes\n",
321 (css_header->header_len * 4)); 264 (css_header->header_len * 4));
322 goto out; 265 return NULL;
323 } 266 }
267
268 csr->version = css_header->version;
269
270 if (IS_SKYLAKE(dev) && csr->version < SKL_CSR_VERSION_REQUIRED) {
271 DRM_INFO("Refusing to load old Skylake DMC firmware v%u.%u,"
272 " please upgrade to v%u.%u or later"
273 " [https://01.org/linuxgraphics/intel-linux-graphics-firmwares].\n",
274 CSR_VERSION_MAJOR(csr->version),
275 CSR_VERSION_MINOR(csr->version),
276 CSR_VERSION_MAJOR(SKL_CSR_VERSION_REQUIRED),
277 CSR_VERSION_MINOR(SKL_CSR_VERSION_REQUIRED));
278 return NULL;
279 }
280
324 readcount += sizeof(struct intel_css_header); 281 readcount += sizeof(struct intel_css_header);
325 282
326 /* Extract Package Header information*/ 283 /* Extract Package Header information*/
327 package_header = (struct intel_package_header *) 284 package_header = (struct intel_package_header *)
328 &fw->data[readcount]; 285 &fw->data[readcount];
329 if (sizeof(struct intel_package_header) != 286 if (sizeof(struct intel_package_header) !=
330 (package_header->header_len * 4)) { 287 (package_header->header_len * 4)) {
331 DRM_ERROR("Firmware has wrong package header length %u bytes\n", 288 DRM_ERROR("Firmware has wrong package header length %u bytes\n",
332 (package_header->header_len * 4)); 289 (package_header->header_len * 4));
333 goto out; 290 return NULL;
334 } 291 }
335 readcount += sizeof(struct intel_package_header); 292 readcount += sizeof(struct intel_package_header);
336 293
337 /* Search for dmc_offset to find firware binary. */ 294 /* Search for dmc_offset to find firware binary. */
338 for (i = 0; i < package_header->num_entries; i++) { 295 for (i = 0; i < package_header->num_entries; i++) {
339 if (package_header->fw_info[i].substepping == '*' && 296 if (package_header->fw_info[i].substepping == '*' &&
340 stepping == package_header->fw_info[i].stepping) { 297 stepping == package_header->fw_info[i].stepping) {
341 dmc_offset = package_header->fw_info[i].offset; 298 dmc_offset = package_header->fw_info[i].offset;
342 break; 299 break;
343 } else if (stepping == package_header->fw_info[i].stepping && 300 } else if (stepping == package_header->fw_info[i].stepping &&
@@ -345,12 +302,12 @@ static void finish_csr_load(const struct firmware *fw, void *context)
345 dmc_offset = package_header->fw_info[i].offset; 302 dmc_offset = package_header->fw_info[i].offset;
346 break; 303 break;
347 } else if (package_header->fw_info[i].stepping == '*' && 304 } else if (package_header->fw_info[i].stepping == '*' &&
348 package_header->fw_info[i].substepping == '*') 305 package_header->fw_info[i].substepping == '*')
349 dmc_offset = package_header->fw_info[i].offset; 306 dmc_offset = package_header->fw_info[i].offset;
350 } 307 }
351 if (dmc_offset == CSR_DEFAULT_FW_OFFSET) { 308 if (dmc_offset == CSR_DEFAULT_FW_OFFSET) {
352 DRM_ERROR("Firmware not supported for %c stepping\n", stepping); 309 DRM_ERROR("Firmware not supported for %c stepping\n", stepping);
353 goto out; 310 return NULL;
354 } 311 }
355 readcount += dmc_offset; 312 readcount += dmc_offset;
356 313
@@ -358,26 +315,26 @@ static void finish_csr_load(const struct firmware *fw, void *context)
358 dmc_header = (struct intel_dmc_header *)&fw->data[readcount]; 315 dmc_header = (struct intel_dmc_header *)&fw->data[readcount];
359 if (sizeof(struct intel_dmc_header) != (dmc_header->header_len)) { 316 if (sizeof(struct intel_dmc_header) != (dmc_header->header_len)) {
360 DRM_ERROR("Firmware has wrong dmc header length %u bytes\n", 317 DRM_ERROR("Firmware has wrong dmc header length %u bytes\n",
361 (dmc_header->header_len)); 318 (dmc_header->header_len));
362 goto out; 319 return NULL;
363 } 320 }
364 readcount += sizeof(struct intel_dmc_header); 321 readcount += sizeof(struct intel_dmc_header);
365 322
366 /* Cache the dmc header info. */ 323 /* Cache the dmc header info. */
367 if (dmc_header->mmio_count > ARRAY_SIZE(csr->mmioaddr)) { 324 if (dmc_header->mmio_count > ARRAY_SIZE(csr->mmioaddr)) {
368 DRM_ERROR("Firmware has wrong mmio count %u\n", 325 DRM_ERROR("Firmware has wrong mmio count %u\n",
369 dmc_header->mmio_count); 326 dmc_header->mmio_count);
370 goto out; 327 return NULL;
371 } 328 }
372 csr->mmio_count = dmc_header->mmio_count; 329 csr->mmio_count = dmc_header->mmio_count;
373 for (i = 0; i < dmc_header->mmio_count; i++) { 330 for (i = 0; i < dmc_header->mmio_count; i++) {
374 if (dmc_header->mmioaddr[i] < CSR_MMIO_START_RANGE || 331 if (dmc_header->mmioaddr[i] < CSR_MMIO_START_RANGE ||
375 dmc_header->mmioaddr[i] > CSR_MMIO_END_RANGE) { 332 dmc_header->mmioaddr[i] > CSR_MMIO_END_RANGE) {
376 DRM_ERROR(" Firmware has wrong mmio address 0x%x\n", 333 DRM_ERROR(" Firmware has wrong mmio address 0x%x\n",
377 dmc_header->mmioaddr[i]); 334 dmc_header->mmioaddr[i]);
378 goto out; 335 return NULL;
379 } 336 }
380 csr->mmioaddr[i] = dmc_header->mmioaddr[i]; 337 csr->mmioaddr[i] = _MMIO(dmc_header->mmioaddr[i]);
381 csr->mmiodata[i] = dmc_header->mmiodata[i]; 338 csr->mmiodata[i] = dmc_header->mmiodata[i];
382 } 339 }
383 340
@@ -385,56 +342,80 @@ static void finish_csr_load(const struct firmware *fw, void *context)
385 nbytes = dmc_header->fw_size * 4; 342 nbytes = dmc_header->fw_size * 4;
386 if (nbytes > CSR_MAX_FW_SIZE) { 343 if (nbytes > CSR_MAX_FW_SIZE) {
387 DRM_ERROR("CSR firmware too big (%u) bytes\n", nbytes); 344 DRM_ERROR("CSR firmware too big (%u) bytes\n", nbytes);
388 goto out; 345 return NULL;
389 } 346 }
390 csr->dmc_fw_size = dmc_header->fw_size; 347 csr->dmc_fw_size = dmc_header->fw_size;
391 348
392 csr->dmc_payload = kmalloc(nbytes, GFP_KERNEL); 349 dmc_payload = kmalloc(nbytes, GFP_KERNEL);
393 if (!csr->dmc_payload) { 350 if (!dmc_payload) {
394 DRM_ERROR("Memory allocation failed for dmc payload\n"); 351 DRM_ERROR("Memory allocation failed for dmc payload\n");
395 goto out; 352 return NULL;
396 } 353 }
397 354
398 dmc_payload = csr->dmc_payload;
399 memcpy(dmc_payload, &fw->data[readcount], nbytes); 355 memcpy(dmc_payload, &fw->data[readcount], nbytes);
400 356
357 return dmc_payload;
358}
359
360static void csr_load_work_fn(struct work_struct *work)
361{
362 struct drm_i915_private *dev_priv;
363 struct intel_csr *csr;
364 const struct firmware *fw;
365 int ret;
366
367 dev_priv = container_of(work, typeof(*dev_priv), csr.work);
368 csr = &dev_priv->csr;
369
370 ret = request_firmware(&fw, dev_priv->csr.fw_path,
371 &dev_priv->dev->pdev->dev);
372 if (!fw)
373 goto out;
374
375 dev_priv->csr.dmc_payload = parse_csr_fw(dev_priv, fw);
376 if (!dev_priv->csr.dmc_payload)
377 goto out;
378
401 /* load csr program during system boot, as needed for DC states */ 379 /* load csr program during system boot, as needed for DC states */
402 intel_csr_load_program(dev); 380 intel_csr_load_program(dev_priv);
403 fw_loaded = true;
404 381
405 DRM_DEBUG_KMS("Finished loading %s\n", dev_priv->csr.fw_path);
406out: 382out:
407 if (fw_loaded) 383 if (dev_priv->csr.dmc_payload) {
408 intel_runtime_pm_put(dev_priv); 384 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
409 else 385
410 intel_csr_load_status_set(dev_priv, FW_FAILED); 386 DRM_INFO("Finished loading %s (v%u.%u)\n",
387 dev_priv->csr.fw_path,
388 CSR_VERSION_MAJOR(csr->version),
389 CSR_VERSION_MINOR(csr->version));
390 } else {
391 DRM_ERROR("Failed to load DMC firmware, disabling rpm\n");
392 }
411 393
412 release_firmware(fw); 394 release_firmware(fw);
413} 395}
414 396
415/** 397/**
416 * intel_csr_ucode_init() - initialize the firmware loading. 398 * intel_csr_ucode_init() - initialize the firmware loading.
417 * @dev: drm device. 399 * @dev_priv: i915 drm device.
418 * 400 *
419 * This function is called at the time of loading the display driver to read 401 * This function is called at the time of loading the display driver to read
420 * firmware from a .bin file and copied into a internal memory. 402 * firmware from a .bin file and copied into a internal memory.
421 */ 403 */
422void intel_csr_ucode_init(struct drm_device *dev) 404void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
423{ 405{
424 struct drm_i915_private *dev_priv = dev->dev_private;
425 struct intel_csr *csr = &dev_priv->csr; 406 struct intel_csr *csr = &dev_priv->csr;
426 int ret;
427 407
428 if (!HAS_CSR(dev)) 408 INIT_WORK(&dev_priv->csr.work, csr_load_work_fn);
409
410 if (!HAS_CSR(dev_priv))
429 return; 411 return;
430 412
431 if (IS_SKYLAKE(dev)) 413 if (IS_SKYLAKE(dev_priv))
432 csr->fw_path = I915_CSR_SKL; 414 csr->fw_path = I915_CSR_SKL;
433 else if (IS_BROXTON(dev_priv)) 415 else if (IS_BROXTON(dev_priv))
434 csr->fw_path = I915_CSR_BXT; 416 csr->fw_path = I915_CSR_BXT;
435 else { 417 else {
436 DRM_ERROR("Unexpected: no known CSR firmware for platform\n"); 418 DRM_ERROR("Unexpected: no known CSR firmware for platform\n");
437 intel_csr_load_status_set(dev_priv, FW_FAILED);
438 return; 419 return;
439 } 420 }
440 421
@@ -444,43 +425,24 @@ void intel_csr_ucode_init(struct drm_device *dev)
444 * Obtain a runtime pm reference, until CSR is loaded, 425 * Obtain a runtime pm reference, until CSR is loaded,
445 * to avoid entering runtime-suspend. 426 * to avoid entering runtime-suspend.
446 */ 427 */
447 intel_runtime_pm_get(dev_priv); 428 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
448 429
449 /* CSR supported for platform, load firmware */ 430 schedule_work(&dev_priv->csr.work);
450 ret = request_firmware_nowait(THIS_MODULE, true, csr->fw_path,
451 &dev_priv->dev->pdev->dev,
452 GFP_KERNEL, dev_priv,
453 finish_csr_load);
454 if (ret) {
455 i915_firmware_load_error_print(csr->fw_path, ret);
456 intel_csr_load_status_set(dev_priv, FW_FAILED);
457 }
458} 431}
459 432
460/** 433/**
461 * intel_csr_ucode_fini() - unload the CSR firmware. 434 * intel_csr_ucode_fini() - unload the CSR firmware.
462 * @dev: drm device. 435 * @dev_priv: i915 drm device.
463 * 436 *
464 * Firmmware unloading includes freeing the internal momory and reset the 437 * Firmmware unloading includes freeing the internal momory and reset the
465 * firmware loading status. 438 * firmware loading status.
466 */ 439 */
467void intel_csr_ucode_fini(struct drm_device *dev) 440void intel_csr_ucode_fini(struct drm_i915_private *dev_priv)
468{ 441{
469 struct drm_i915_private *dev_priv = dev->dev_private; 442 if (!HAS_CSR(dev_priv))
470
471 if (!HAS_CSR(dev))
472 return; 443 return;
473 444
474 intel_csr_load_status_set(dev_priv, FW_FAILED); 445 flush_work(&dev_priv->csr.work);
475 kfree(dev_priv->csr.dmc_payload);
476}
477 446
478void assert_csr_loaded(struct drm_i915_private *dev_priv) 447 kfree(dev_priv->csr.dmc_payload);
479{
480 WARN_ONCE(intel_csr_load_status_get(dev_priv) != FW_LOADED,
481 "CSR is not loaded.\n");
482 WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
483 "CSR program storage start is NULL\n");
484 WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
485 WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
486} 448}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index b25e99a432fb..4afb3103eb96 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -133,12 +133,12 @@ static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
133 { 0x00002016, 0x000000A0, 0x0 }, 133 { 0x00002016, 0x000000A0, 0x0 },
134 { 0x00005012, 0x0000009B, 0x0 }, 134 { 0x00005012, 0x0000009B, 0x0 },
135 { 0x00007011, 0x00000088, 0x0 }, 135 { 0x00007011, 0x00000088, 0x0 },
136 { 0x00009010, 0x000000C7, 0x0 }, 136 { 0x80009010, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
137 { 0x00002016, 0x0000009B, 0x0 }, 137 { 0x00002016, 0x0000009B, 0x0 },
138 { 0x00005012, 0x00000088, 0x0 }, 138 { 0x00005012, 0x00000088, 0x0 },
139 { 0x00007011, 0x000000C7, 0x0 }, 139 { 0x80007011, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
140 { 0x00002016, 0x000000DF, 0x0 }, 140 { 0x00002016, 0x000000DF, 0x0 },
141 { 0x00005012, 0x000000C7, 0x0 }, 141 { 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
142}; 142};
143 143
144/* Skylake U */ 144/* Skylake U */
@@ -146,12 +146,12 @@ static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
146 { 0x0000201B, 0x000000A2, 0x0 }, 146 { 0x0000201B, 0x000000A2, 0x0 },
147 { 0x00005012, 0x00000088, 0x0 }, 147 { 0x00005012, 0x00000088, 0x0 },
148 { 0x00007011, 0x00000087, 0x0 }, 148 { 0x00007011, 0x00000087, 0x0 },
149 { 0x80009010, 0x000000C7, 0x1 }, /* Uses I_boost level 0x1 */ 149 { 0x80009010, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
150 { 0x0000201B, 0x0000009D, 0x0 }, 150 { 0x0000201B, 0x0000009D, 0x0 },
151 { 0x00005012, 0x000000C7, 0x0 }, 151 { 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
152 { 0x00007011, 0x000000C7, 0x0 }, 152 { 0x80007011, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
153 { 0x00002016, 0x00000088, 0x0 }, 153 { 0x00002016, 0x00000088, 0x0 },
154 { 0x00005012, 0x000000C7, 0x0 }, 154 { 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
155}; 155};
156 156
157/* Skylake Y */ 157/* Skylake Y */
@@ -159,12 +159,12 @@ static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
159 { 0x00000018, 0x000000A2, 0x0 }, 159 { 0x00000018, 0x000000A2, 0x0 },
160 { 0x00005012, 0x00000088, 0x0 }, 160 { 0x00005012, 0x00000088, 0x0 },
161 { 0x00007011, 0x00000087, 0x0 }, 161 { 0x00007011, 0x00000087, 0x0 },
162 { 0x80009010, 0x000000C7, 0x3 }, /* Uses I_boost level 0x3 */ 162 { 0x80009010, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
163 { 0x00000018, 0x0000009D, 0x0 }, 163 { 0x00000018, 0x0000009D, 0x0 },
164 { 0x00005012, 0x000000C7, 0x0 }, 164 { 0x80005012, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
165 { 0x00007011, 0x000000C7, 0x0 }, 165 { 0x80007011, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
166 { 0x00000018, 0x00000088, 0x0 }, 166 { 0x00000018, 0x00000088, 0x0 },
167 { 0x00005012, 0x000000C7, 0x0 }, 167 { 0x80005012, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
168}; 168};
169 169
170/* 170/*
@@ -345,7 +345,7 @@ enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
345static bool 345static bool
346intel_dig_port_supports_hdmi(const struct intel_digital_port *intel_dig_port) 346intel_dig_port_supports_hdmi(const struct intel_digital_port *intel_dig_port)
347{ 347{
348 return intel_dig_port->hdmi.hdmi_reg; 348 return i915_mmio_reg_valid(intel_dig_port->hdmi.hdmi_reg);
349} 349}
350 350
351static const struct ddi_buf_trans *skl_get_buf_trans_dp(struct drm_device *dev, 351static const struct ddi_buf_trans *skl_get_buf_trans_dp(struct drm_device *dev,
@@ -448,7 +448,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
448 bxt_ddi_vswing_sequence(dev, hdmi_level, port, 448 bxt_ddi_vswing_sequence(dev, hdmi_level, port,
449 INTEL_OUTPUT_HDMI); 449 INTEL_OUTPUT_HDMI);
450 return; 450 return;
451 } else if (IS_SKYLAKE(dev)) { 451 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
452 ddi_translations_fdi = NULL; 452 ddi_translations_fdi = NULL;
453 ddi_translations_dp = 453 ddi_translations_dp =
454 skl_get_buf_trans_dp(dev, &n_dp_entries); 454 skl_get_buf_trans_dp(dev, &n_dp_entries);
@@ -576,7 +576,7 @@ void intel_prepare_ddi(struct drm_device *dev)
576static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, 576static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
577 enum port port) 577 enum port port)
578{ 578{
579 uint32_t reg = DDI_BUF_CTL(port); 579 i915_reg_t reg = DDI_BUF_CTL(port);
580 int i; 580 int i;
581 581
582 for (i = 0; i < 16; i++) { 582 for (i = 0; i < 16; i++) {
@@ -931,7 +931,8 @@ static void hsw_wrpll_update_rnp(uint64_t freq2k, unsigned budget,
931 /* Otherwise a < c && b >= d, do nothing */ 931 /* Otherwise a < c && b >= d, do nothing */
932} 932}
933 933
934static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, int reg) 934static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
935 i915_reg_t reg)
935{ 936{
936 int refclk = LC_FREQ; 937 int refclk = LC_FREQ;
937 int n, p, r; 938 int n, p, r;
@@ -967,7 +968,7 @@ static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, int reg)
967static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv, 968static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
968 uint32_t dpll) 969 uint32_t dpll)
969{ 970{
970 uint32_t cfgcr1_reg, cfgcr2_reg; 971 i915_reg_t cfgcr1_reg, cfgcr2_reg;
971 uint32_t cfgcr1_val, cfgcr2_val; 972 uint32_t cfgcr1_val, cfgcr2_val;
972 uint32_t p0, p1, p2, dco_freq; 973 uint32_t p0, p1, p2, dco_freq;
973 974
@@ -1112,10 +1113,10 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder,
1112 link_clock = 270000; 1113 link_clock = 270000;
1113 break; 1114 break;
1114 case PORT_CLK_SEL_WRPLL1: 1115 case PORT_CLK_SEL_WRPLL1:
1115 link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL1); 1116 link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL(0));
1116 break; 1117 break;
1117 case PORT_CLK_SEL_WRPLL2: 1118 case PORT_CLK_SEL_WRPLL2:
1118 link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL2); 1119 link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL(1));
1119 break; 1120 break;
1120 case PORT_CLK_SEL_SPLL: 1121 case PORT_CLK_SEL_SPLL:
1121 pll = I915_READ(SPLL_CTL) & SPLL_PLL_FREQ_MASK; 1122 pll = I915_READ(SPLL_CTL) & SPLL_PLL_FREQ_MASK;
@@ -1184,7 +1185,7 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
1184 1185
1185 if (INTEL_INFO(dev)->gen <= 8) 1186 if (INTEL_INFO(dev)->gen <= 8)
1186 hsw_ddi_clock_get(encoder, pipe_config); 1187 hsw_ddi_clock_get(encoder, pipe_config);
1187 else if (IS_SKYLAKE(dev)) 1188 else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
1188 skl_ddi_clock_get(encoder, pipe_config); 1189 skl_ddi_clock_get(encoder, pipe_config);
1189 else if (IS_BROXTON(dev)) 1190 else if (IS_BROXTON(dev))
1190 bxt_ddi_clock_get(encoder, pipe_config); 1191 bxt_ddi_clock_get(encoder, pipe_config);
@@ -1286,6 +1287,18 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
1286 } 1287 }
1287 1288
1288 crtc_state->ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id); 1289 crtc_state->ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id);
1290 } else if (crtc_state->ddi_pll_sel == PORT_CLK_SEL_SPLL) {
1291 struct drm_atomic_state *state = crtc_state->base.state;
1292 struct intel_shared_dpll_config *spll =
1293 &intel_atomic_get_shared_dpll_state(state)[DPLL_ID_SPLL];
1294
1295 if (spll->crtc_mask &&
1296 WARN_ON(spll->hw_state.spll != crtc_state->dpll_hw_state.spll))
1297 return false;
1298
1299 crtc_state->shared_dpll = DPLL_ID_SPLL;
1300 spll->hw_state.spll = crtc_state->dpll_hw_state.spll;
1301 spll->crtc_mask |= 1 << intel_crtc->pipe;
1289 } 1302 }
1290 1303
1291 return true; 1304 return true;
@@ -1768,7 +1781,7 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
1768 struct intel_encoder *intel_encoder = 1781 struct intel_encoder *intel_encoder =
1769 intel_ddi_get_crtc_new_encoder(crtc_state); 1782 intel_ddi_get_crtc_new_encoder(crtc_state);
1770 1783
1771 if (IS_SKYLAKE(dev)) 1784 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
1772 return skl_ddi_pll_select(intel_crtc, crtc_state, 1785 return skl_ddi_pll_select(intel_crtc, crtc_state,
1773 intel_encoder); 1786 intel_encoder);
1774 else if (IS_BROXTON(dev)) 1787 else if (IS_BROXTON(dev))
@@ -1930,7 +1943,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
1930void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, 1943void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
1931 enum transcoder cpu_transcoder) 1944 enum transcoder cpu_transcoder)
1932{ 1945{
1933 uint32_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); 1946 i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
1934 uint32_t val = I915_READ(reg); 1947 uint32_t val = I915_READ(reg);
1935 1948
1936 val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC); 1949 val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
@@ -2085,21 +2098,21 @@ static void skl_ddi_set_iboost(struct drm_device *dev, u32 level,
2085 iboost = dp_iboost; 2098 iboost = dp_iboost;
2086 } else { 2099 } else {
2087 ddi_translations = skl_get_buf_trans_dp(dev, &n_entries); 2100 ddi_translations = skl_get_buf_trans_dp(dev, &n_entries);
2088 iboost = ddi_translations[port].i_boost; 2101 iboost = ddi_translations[level].i_boost;
2089 } 2102 }
2090 } else if (type == INTEL_OUTPUT_EDP) { 2103 } else if (type == INTEL_OUTPUT_EDP) {
2091 if (dp_iboost) { 2104 if (dp_iboost) {
2092 iboost = dp_iboost; 2105 iboost = dp_iboost;
2093 } else { 2106 } else {
2094 ddi_translations = skl_get_buf_trans_edp(dev, &n_entries); 2107 ddi_translations = skl_get_buf_trans_edp(dev, &n_entries);
2095 iboost = ddi_translations[port].i_boost; 2108 iboost = ddi_translations[level].i_boost;
2096 } 2109 }
2097 } else if (type == INTEL_OUTPUT_HDMI) { 2110 } else if (type == INTEL_OUTPUT_HDMI) {
2098 if (hdmi_iboost) { 2111 if (hdmi_iboost) {
2099 iboost = hdmi_iboost; 2112 iboost = hdmi_iboost;
2100 } else { 2113 } else {
2101 ddi_translations = skl_get_buf_trans_hdmi(dev, &n_entries); 2114 ddi_translations = skl_get_buf_trans_hdmi(dev, &n_entries);
2102 iboost = ddi_translations[port].i_boost; 2115 iboost = ddi_translations[level].i_boost;
2103 } 2116 }
2104 } else { 2117 } else {
2105 return; 2118 return;
@@ -2251,7 +2264,7 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
2251 2264
2252 level = translate_signal_level(signal_levels); 2265 level = translate_signal_level(signal_levels);
2253 2266
2254 if (IS_SKYLAKE(dev)) 2267 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
2255 skl_ddi_set_iboost(dev, level, port, encoder->type); 2268 skl_ddi_set_iboost(dev, level, port, encoder->type);
2256 else if (IS_BROXTON(dev)) 2269 else if (IS_BROXTON(dev))
2257 bxt_ddi_vswing_sequence(dev, level, port, encoder->type); 2270 bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
@@ -2259,30 +2272,21 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
2259 return DDI_BUF_TRANS_SELECT(level); 2272 return DDI_BUF_TRANS_SELECT(level);
2260} 2273}
2261 2274
2262static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) 2275void intel_ddi_clk_select(struct intel_encoder *encoder,
2276 const struct intel_crtc_state *pipe_config)
2263{ 2277{
2264 struct drm_encoder *encoder = &intel_encoder->base; 2278 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2265 struct drm_device *dev = encoder->dev; 2279 enum port port = intel_ddi_get_encoder_port(encoder);
2266 struct drm_i915_private *dev_priv = dev->dev_private;
2267 struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
2268 enum port port = intel_ddi_get_encoder_port(intel_encoder);
2269 int type = intel_encoder->type;
2270 int hdmi_level;
2271
2272 if (type == INTEL_OUTPUT_EDP) {
2273 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2274 intel_edp_panel_on(intel_dp);
2275 }
2276 2280
2277 if (IS_SKYLAKE(dev)) { 2281 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
2278 uint32_t dpll = crtc->config->ddi_pll_sel; 2282 uint32_t dpll = pipe_config->ddi_pll_sel;
2279 uint32_t val; 2283 uint32_t val;
2280 2284
2281 /* 2285 /*
2282 * DPLL0 is used for eDP and is the only "private" DPLL (as 2286 * DPLL0 is used for eDP and is the only "private" DPLL (as
2283 * opposed to shared) on SKL 2287 * opposed to shared) on SKL
2284 */ 2288 */
2285 if (type == INTEL_OUTPUT_EDP) { 2289 if (encoder->type == INTEL_OUTPUT_EDP) {
2286 WARN_ON(dpll != SKL_DPLL0); 2290 WARN_ON(dpll != SKL_DPLL0);
2287 2291
2288 val = I915_READ(DPLL_CTRL1); 2292 val = I915_READ(DPLL_CTRL1);
@@ -2290,7 +2294,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
2290 val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) | 2294 val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) |
2291 DPLL_CTRL1_SSC(dpll) | 2295 DPLL_CTRL1_SSC(dpll) |
2292 DPLL_CTRL1_LINK_RATE_MASK(dpll)); 2296 DPLL_CTRL1_LINK_RATE_MASK(dpll));
2293 val |= crtc->config->dpll_hw_state.ctrl1 << (dpll * 6); 2297 val |= pipe_config->dpll_hw_state.ctrl1 << (dpll * 6);
2294 2298
2295 I915_WRITE(DPLL_CTRL1, val); 2299 I915_WRITE(DPLL_CTRL1, val);
2296 POSTING_READ(DPLL_CTRL1); 2300 POSTING_READ(DPLL_CTRL1);
@@ -2306,11 +2310,29 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
2306 2310
2307 I915_WRITE(DPLL_CTRL2, val); 2311 I915_WRITE(DPLL_CTRL2, val);
2308 2312
2309 } else if (INTEL_INFO(dev)->gen < 9) { 2313 } else if (INTEL_INFO(dev_priv)->gen < 9) {
2310 WARN_ON(crtc->config->ddi_pll_sel == PORT_CLK_SEL_NONE); 2314 WARN_ON(pipe_config->ddi_pll_sel == PORT_CLK_SEL_NONE);
2311 I915_WRITE(PORT_CLK_SEL(port), crtc->config->ddi_pll_sel); 2315 I915_WRITE(PORT_CLK_SEL(port), pipe_config->ddi_pll_sel);
2316 }
2317}
2318
2319static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
2320{
2321 struct drm_encoder *encoder = &intel_encoder->base;
2322 struct drm_device *dev = encoder->dev;
2323 struct drm_i915_private *dev_priv = dev->dev_private;
2324 struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
2325 enum port port = intel_ddi_get_encoder_port(intel_encoder);
2326 int type = intel_encoder->type;
2327 int hdmi_level;
2328
2329 if (type == INTEL_OUTPUT_EDP) {
2330 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2331 intel_edp_panel_on(intel_dp);
2312 } 2332 }
2313 2333
2334 intel_ddi_clk_select(intel_encoder, crtc->config);
2335
2314 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { 2336 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
2315 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2337 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2316 2338
@@ -2369,7 +2391,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
2369 intel_edp_panel_off(intel_dp); 2391 intel_edp_panel_off(intel_dp);
2370 } 2392 }
2371 2393
2372 if (IS_SKYLAKE(dev)) 2394 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
2373 I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) | 2395 I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) |
2374 DPLL_CTRL2_DDI_CLK_OFF(port))); 2396 DPLL_CTRL2_DDI_CLK_OFF(port)));
2375 else if (INTEL_INFO(dev)->gen < 9) 2397 else if (INTEL_INFO(dev)->gen < 9)
@@ -2437,7 +2459,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
2437 } 2459 }
2438} 2460}
2439 2461
2440static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv, 2462static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
2441 struct intel_shared_dpll *pll) 2463 struct intel_shared_dpll *pll)
2442{ 2464{
2443 I915_WRITE(WRPLL_CTL(pll->id), pll->config.hw_state.wrpll); 2465 I915_WRITE(WRPLL_CTL(pll->id), pll->config.hw_state.wrpll);
@@ -2445,9 +2467,17 @@ static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv,
2445 udelay(20); 2467 udelay(20);
2446} 2468}
2447 2469
2448static void hsw_ddi_pll_disable(struct drm_i915_private *dev_priv, 2470static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
2449 struct intel_shared_dpll *pll) 2471 struct intel_shared_dpll *pll)
2450{ 2472{
2473 I915_WRITE(SPLL_CTL, pll->config.hw_state.spll);
2474 POSTING_READ(SPLL_CTL);
2475 udelay(20);
2476}
2477
2478static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
2479 struct intel_shared_dpll *pll)
2480{
2451 uint32_t val; 2481 uint32_t val;
2452 2482
2453 val = I915_READ(WRPLL_CTL(pll->id)); 2483 val = I915_READ(WRPLL_CTL(pll->id));
@@ -2455,9 +2485,19 @@ static void hsw_ddi_pll_disable(struct drm_i915_private *dev_priv,
2455 POSTING_READ(WRPLL_CTL(pll->id)); 2485 POSTING_READ(WRPLL_CTL(pll->id));
2456} 2486}
2457 2487
2458static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, 2488static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
2459 struct intel_shared_dpll *pll, 2489 struct intel_shared_dpll *pll)
2460 struct intel_dpll_hw_state *hw_state) 2490{
2491 uint32_t val;
2492
2493 val = I915_READ(SPLL_CTL);
2494 I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
2495 POSTING_READ(SPLL_CTL);
2496}
2497
2498static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
2499 struct intel_shared_dpll *pll,
2500 struct intel_dpll_hw_state *hw_state)
2461{ 2501{
2462 uint32_t val; 2502 uint32_t val;
2463 2503
@@ -2470,25 +2510,50 @@ static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2470 return val & WRPLL_PLL_ENABLE; 2510 return val & WRPLL_PLL_ENABLE;
2471} 2511}
2472 2512
2513static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
2514 struct intel_shared_dpll *pll,
2515 struct intel_dpll_hw_state *hw_state)
2516{
2517 uint32_t val;
2518
2519 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
2520 return false;
2521
2522 val = I915_READ(SPLL_CTL);
2523 hw_state->spll = val;
2524
2525 return val & SPLL_PLL_ENABLE;
2526}
2527
2528
2473static const char * const hsw_ddi_pll_names[] = { 2529static const char * const hsw_ddi_pll_names[] = {
2474 "WRPLL 1", 2530 "WRPLL 1",
2475 "WRPLL 2", 2531 "WRPLL 2",
2532 "SPLL"
2476}; 2533};
2477 2534
2478static void hsw_shared_dplls_init(struct drm_i915_private *dev_priv) 2535static void hsw_shared_dplls_init(struct drm_i915_private *dev_priv)
2479{ 2536{
2480 int i; 2537 int i;
2481 2538
2482 dev_priv->num_shared_dpll = 2; 2539 dev_priv->num_shared_dpll = 3;
2483 2540
2484 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 2541 for (i = 0; i < 2; i++) {
2485 dev_priv->shared_dplls[i].id = i; 2542 dev_priv->shared_dplls[i].id = i;
2486 dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i]; 2543 dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i];
2487 dev_priv->shared_dplls[i].disable = hsw_ddi_pll_disable; 2544 dev_priv->shared_dplls[i].disable = hsw_ddi_wrpll_disable;
2488 dev_priv->shared_dplls[i].enable = hsw_ddi_pll_enable; 2545 dev_priv->shared_dplls[i].enable = hsw_ddi_wrpll_enable;
2489 dev_priv->shared_dplls[i].get_hw_state = 2546 dev_priv->shared_dplls[i].get_hw_state =
2490 hsw_ddi_pll_get_hw_state; 2547 hsw_ddi_wrpll_get_hw_state;
2491 } 2548 }
2549
2550 /* SPLL is special, but needs to be initialized anyway.. */
2551 dev_priv->shared_dplls[i].id = i;
2552 dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i];
2553 dev_priv->shared_dplls[i].disable = hsw_ddi_spll_disable;
2554 dev_priv->shared_dplls[i].enable = hsw_ddi_spll_enable;
2555 dev_priv->shared_dplls[i].get_hw_state = hsw_ddi_spll_get_hw_state;
2556
2492} 2557}
2493 2558
2494static const char * const skl_ddi_pll_names[] = { 2559static const char * const skl_ddi_pll_names[] = {
@@ -2498,7 +2563,7 @@ static const char * const skl_ddi_pll_names[] = {
2498}; 2563};
2499 2564
2500struct skl_dpll_regs { 2565struct skl_dpll_regs {
2501 u32 ctl, cfgcr1, cfgcr2; 2566 i915_reg_t ctl, cfgcr1, cfgcr2;
2502}; 2567};
2503 2568
2504/* this array is indexed by the *shared* pll id */ 2569/* this array is indexed by the *shared* pll id */
@@ -2511,13 +2576,13 @@ static const struct skl_dpll_regs skl_dpll_regs[3] = {
2511 }, 2576 },
2512 { 2577 {
2513 /* DPLL 2 */ 2578 /* DPLL 2 */
2514 .ctl = WRPLL_CTL1, 2579 .ctl = WRPLL_CTL(0),
2515 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2), 2580 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
2516 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2), 2581 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
2517 }, 2582 },
2518 { 2583 {
2519 /* DPLL 3 */ 2584 /* DPLL 3 */
2520 .ctl = WRPLL_CTL2, 2585 .ctl = WRPLL_CTL(1),
2521 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3), 2586 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
2522 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3), 2587 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
2523 }, 2588 },
@@ -2937,22 +3002,22 @@ void intel_ddi_pll_init(struct drm_device *dev)
2937 struct drm_i915_private *dev_priv = dev->dev_private; 3002 struct drm_i915_private *dev_priv = dev->dev_private;
2938 uint32_t val = I915_READ(LCPLL_CTL); 3003 uint32_t val = I915_READ(LCPLL_CTL);
2939 3004
2940 if (IS_SKYLAKE(dev)) 3005 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
2941 skl_shared_dplls_init(dev_priv); 3006 skl_shared_dplls_init(dev_priv);
2942 else if (IS_BROXTON(dev)) 3007 else if (IS_BROXTON(dev))
2943 bxt_shared_dplls_init(dev_priv); 3008 bxt_shared_dplls_init(dev_priv);
2944 else 3009 else
2945 hsw_shared_dplls_init(dev_priv); 3010 hsw_shared_dplls_init(dev_priv);
2946 3011
2947 if (IS_SKYLAKE(dev)) { 3012 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
2948 int cdclk_freq; 3013 int cdclk_freq;
2949 3014
2950 cdclk_freq = dev_priv->display.get_display_clock_speed(dev); 3015 cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
2951 dev_priv->skl_boot_cdclk = cdclk_freq; 3016 dev_priv->skl_boot_cdclk = cdclk_freq;
3017 if (skl_sanitize_cdclk(dev_priv))
3018 DRM_DEBUG_KMS("Sanitized cdclk programmed by pre-os\n");
2952 if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) 3019 if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
2953 DRM_ERROR("LCPLL1 is disabled\n"); 3020 DRM_ERROR("LCPLL1 is disabled\n");
2954 else
2955 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
2956 } else if (IS_BROXTON(dev)) { 3021 } else if (IS_BROXTON(dev)) {
2957 broxton_init_cdclk(dev); 3022 broxton_init_cdclk(dev);
2958 broxton_ddi_phy_init(dev); 3023 broxton_ddi_phy_init(dev);
@@ -2971,11 +3036,11 @@ void intel_ddi_pll_init(struct drm_device *dev)
2971 } 3036 }
2972} 3037}
2973 3038
2974void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder) 3039void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
2975{ 3040{
2976 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 3041 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2977 struct intel_dp *intel_dp = &intel_dig_port->dp; 3042 struct drm_i915_private *dev_priv =
2978 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 3043 to_i915(intel_dig_port->base.base.dev);
2979 enum port port = intel_dig_port->port; 3044 enum port port = intel_dig_port->port;
2980 uint32_t val; 3045 uint32_t val;
2981 bool wait = false; 3046 bool wait = false;
@@ -3086,7 +3151,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
3086 pipe_config->has_hdmi_sink = true; 3151 pipe_config->has_hdmi_sink = true;
3087 intel_hdmi = enc_to_intel_hdmi(&encoder->base); 3152 intel_hdmi = enc_to_intel_hdmi(&encoder->base);
3088 3153
3089 if (intel_hdmi->infoframe_enabled(&encoder->base)) 3154 if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config))
3090 pipe_config->has_infoframe = true; 3155 pipe_config->has_infoframe = true;
3091 break; 3156 break;
3092 case TRANS_DDI_MODE_SELECT_DVI: 3157 case TRANS_DDI_MODE_SELECT_DVI:
@@ -3219,7 +3284,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
3219 encoder = &intel_encoder->base; 3284 encoder = &intel_encoder->base;
3220 3285
3221 drm_encoder_init(dev, encoder, &intel_ddi_funcs, 3286 drm_encoder_init(dev, encoder, &intel_ddi_funcs,
3222 DRM_MODE_ENCODER_TMDS); 3287 DRM_MODE_ENCODER_TMDS, NULL);
3223 3288
3224 intel_encoder->compute_config = intel_ddi_compute_config; 3289 intel_encoder->compute_config = intel_ddi_compute_config;
3225 intel_encoder->enable = intel_enable_ddi; 3290 intel_encoder->enable = intel_enable_ddi;
@@ -3234,6 +3299,20 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
3234 (DDI_BUF_PORT_REVERSAL | 3299 (DDI_BUF_PORT_REVERSAL |
3235 DDI_A_4_LANES); 3300 DDI_A_4_LANES);
3236 3301
3302 /*
3303 * Bspec says that DDI_A_4_LANES is the only supported configuration
3304 * for Broxton. Yet some BIOS fail to set this bit on port A if eDP
3305 * wasn't lit up at boot. Force this bit on in our internal
3306 * configuration so that we use the proper lane count for our
3307 * calculations.
3308 */
3309 if (IS_BROXTON(dev) && port == PORT_A) {
3310 if (!(intel_dig_port->saved_port_bits & DDI_A_4_LANES)) {
3311 DRM_DEBUG_KMS("BXT BIOS forgot to set DDI_A_4_LANES for port A; fixing\n");
3312 intel_dig_port->saved_port_bits |= DDI_A_4_LANES;
3313 }
3314 }
3315
3237 intel_encoder->type = INTEL_OUTPUT_UNKNOWN; 3316 intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
3238 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 3317 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
3239 intel_encoder->cloneable = 0; 3318 intel_encoder->cloneable = 0;
@@ -3247,8 +3326,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
3247 * On BXT A0/A1, sw needs to activate DDIA HPD logic and 3326 * On BXT A0/A1, sw needs to activate DDIA HPD logic and
3248 * interrupts to check the external panel connection. 3327 * interrupts to check the external panel connection.
3249 */ 3328 */
3250 if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0) 3329 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1) && port == PORT_B)
3251 && port == PORT_B)
3252 dev_priv->hotplug.irq_port[PORT_A] = intel_dig_port; 3330 dev_priv->hotplug.irq_port[PORT_A] = intel_dig_port;
3253 else 3331 else
3254 dev_priv->hotplug.irq_port[port] = intel_dig_port; 3332 dev_priv->hotplug.irq_port[port] = intel_dig_port;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f62ffc04c21d..bda6b9c82e66 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -44,6 +44,8 @@
44#include <drm/drm_plane_helper.h> 44#include <drm/drm_plane_helper.h>
45#include <drm/drm_rect.h> 45#include <drm/drm_rect.h>
46#include <linux/dma_remapping.h> 46#include <linux/dma_remapping.h>
47#include <linux/reservation.h>
48#include <linux/dma-buf.h>
47 49
48/* Primary plane formats for gen <= 3 */ 50/* Primary plane formats for gen <= 3 */
49static const uint32_t i8xx_primary_formats[] = { 51static const uint32_t i8xx_primary_formats[] = {
@@ -1095,7 +1097,7 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1095static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe) 1097static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
1096{ 1098{
1097 struct drm_i915_private *dev_priv = dev->dev_private; 1099 struct drm_i915_private *dev_priv = dev->dev_private;
1098 u32 reg = PIPEDSL(pipe); 1100 i915_reg_t reg = PIPEDSL(pipe);
1099 u32 line1, line2; 1101 u32 line1, line2;
1100 u32 line_mask; 1102 u32 line_mask;
1101 1103
@@ -1135,7 +1137,7 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
1135 enum pipe pipe = crtc->pipe; 1137 enum pipe pipe = crtc->pipe;
1136 1138
1137 if (INTEL_INFO(dev)->gen >= 4) { 1139 if (INTEL_INFO(dev)->gen >= 4) {
1138 int reg = PIPECONF(cpu_transcoder); 1140 i915_reg_t reg = PIPECONF(cpu_transcoder);
1139 1141
1140 /* Wait for the Pipe State to go off */ 1142 /* Wait for the Pipe State to go off */
1141 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 1143 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
@@ -1285,7 +1287,7 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1285 enum pipe pipe) 1287 enum pipe pipe)
1286{ 1288{
1287 struct drm_device *dev = dev_priv->dev; 1289 struct drm_device *dev = dev_priv->dev;
1288 int pp_reg; 1290 i915_reg_t pp_reg;
1289 u32 val; 1291 u32 val;
1290 enum pipe panel_pipe = PIPE_A; 1292 enum pipe panel_pipe = PIPE_A;
1291 bool locked = true; 1293 bool locked = true;
@@ -1480,8 +1482,7 @@ static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1480 return false; 1482 return false;
1481 1483
1482 if (HAS_PCH_CPT(dev_priv->dev)) { 1484 if (HAS_PCH_CPT(dev_priv->dev)) {
1483 u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe); 1485 u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
1484 u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1485 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel) 1486 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1486 return false; 1487 return false;
1487 } else if (IS_CHERRYVIEW(dev_priv->dev)) { 1488 } else if (IS_CHERRYVIEW(dev_priv->dev)) {
@@ -1545,12 +1546,13 @@ static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1545} 1546}
1546 1547
1547static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, 1548static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1548 enum pipe pipe, int reg, u32 port_sel) 1549 enum pipe pipe, i915_reg_t reg,
1550 u32 port_sel)
1549{ 1551{
1550 u32 val = I915_READ(reg); 1552 u32 val = I915_READ(reg);
1551 I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val), 1553 I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1552 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", 1554 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1553 reg, pipe_name(pipe)); 1555 i915_mmio_reg_offset(reg), pipe_name(pipe));
1554 1556
1555 I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0 1557 I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
1556 && (val & DP_PIPEB_SELECT), 1558 && (val & DP_PIPEB_SELECT),
@@ -1558,12 +1560,12 @@ static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1558} 1560}
1559 1561
1560static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, 1562static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1561 enum pipe pipe, int reg) 1563 enum pipe pipe, i915_reg_t reg)
1562{ 1564{
1563 u32 val = I915_READ(reg); 1565 u32 val = I915_READ(reg);
1564 I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val), 1566 I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1565 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", 1567 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1566 reg, pipe_name(pipe)); 1568 i915_mmio_reg_offset(reg), pipe_name(pipe));
1567 1569
1568 I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0 1570 I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
1569 && (val & SDVO_PIPE_B_SELECT), 1571 && (val & SDVO_PIPE_B_SELECT),
@@ -1599,7 +1601,7 @@ static void vlv_enable_pll(struct intel_crtc *crtc,
1599{ 1601{
1600 struct drm_device *dev = crtc->base.dev; 1602 struct drm_device *dev = crtc->base.dev;
1601 struct drm_i915_private *dev_priv = dev->dev_private; 1603 struct drm_i915_private *dev_priv = dev->dev_private;
1602 int reg = DPLL(crtc->pipe); 1604 i915_reg_t reg = DPLL(crtc->pipe);
1603 u32 dpll = pipe_config->dpll_hw_state.dpll; 1605 u32 dpll = pipe_config->dpll_hw_state.dpll;
1604 1606
1605 assert_pipe_disabled(dev_priv, crtc->pipe); 1607 assert_pipe_disabled(dev_priv, crtc->pipe);
@@ -1688,7 +1690,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
1688{ 1690{
1689 struct drm_device *dev = crtc->base.dev; 1691 struct drm_device *dev = crtc->base.dev;
1690 struct drm_i915_private *dev_priv = dev->dev_private; 1692 struct drm_i915_private *dev_priv = dev->dev_private;
1691 int reg = DPLL(crtc->pipe); 1693 i915_reg_t reg = DPLL(crtc->pipe);
1692 u32 dpll = crtc->config->dpll_hw_state.dpll; 1694 u32 dpll = crtc->config->dpll_hw_state.dpll;
1693 1695
1694 assert_pipe_disabled(dev_priv, crtc->pipe); 1696 assert_pipe_disabled(dev_priv, crtc->pipe);
@@ -1837,7 +1839,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1837 unsigned int expected_mask) 1839 unsigned int expected_mask)
1838{ 1840{
1839 u32 port_mask; 1841 u32 port_mask;
1840 int dpll_reg; 1842 i915_reg_t dpll_reg;
1841 1843
1842 switch (dport->port) { 1844 switch (dport->port) {
1843 case PORT_B: 1845 case PORT_B:
@@ -1962,7 +1964,8 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1962 struct drm_device *dev = dev_priv->dev; 1964 struct drm_device *dev = dev_priv->dev;
1963 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1965 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1964 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1966 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1965 uint32_t reg, val, pipeconf_val; 1967 i915_reg_t reg;
1968 uint32_t val, pipeconf_val;
1966 1969
1967 /* PCH only available on ILK+ */ 1970 /* PCH only available on ILK+ */
1968 BUG_ON(!HAS_PCH_SPLIT(dev)); 1971 BUG_ON(!HAS_PCH_SPLIT(dev));
@@ -2051,7 +2054,8 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
2051 enum pipe pipe) 2054 enum pipe pipe)
2052{ 2055{
2053 struct drm_device *dev = dev_priv->dev; 2056 struct drm_device *dev = dev_priv->dev;
2054 uint32_t reg, val; 2057 i915_reg_t reg;
2058 uint32_t val;
2055 2059
2056 /* FDI relies on the transcoder */ 2060 /* FDI relies on the transcoder */
2057 assert_fdi_tx_disabled(dev_priv, pipe); 2061 assert_fdi_tx_disabled(dev_priv, pipe);
@@ -2068,7 +2072,7 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
2068 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) 2072 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
2069 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe)); 2073 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
2070 2074
2071 if (!HAS_PCH_IBX(dev)) { 2075 if (HAS_PCH_CPT(dev)) {
2072 /* Workaround: Clear the timing override chicken bit again. */ 2076 /* Workaround: Clear the timing override chicken bit again. */
2073 reg = TRANS_CHICKEN2(pipe); 2077 reg = TRANS_CHICKEN2(pipe);
2074 val = I915_READ(reg); 2078 val = I915_READ(reg);
@@ -2106,10 +2110,9 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
2106 struct drm_device *dev = crtc->base.dev; 2110 struct drm_device *dev = crtc->base.dev;
2107 struct drm_i915_private *dev_priv = dev->dev_private; 2111 struct drm_i915_private *dev_priv = dev->dev_private;
2108 enum pipe pipe = crtc->pipe; 2112 enum pipe pipe = crtc->pipe;
2109 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 2113 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
2110 pipe);
2111 enum pipe pch_transcoder; 2114 enum pipe pch_transcoder;
2112 int reg; 2115 i915_reg_t reg;
2113 u32 val; 2116 u32 val;
2114 2117
2115 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe)); 2118 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
@@ -2129,7 +2132,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
2129 * need the check. 2132 * need the check.
2130 */ 2133 */
2131 if (HAS_GMCH_DISPLAY(dev_priv->dev)) 2134 if (HAS_GMCH_DISPLAY(dev_priv->dev))
2132 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) 2135 if (crtc->config->has_dsi_encoder)
2133 assert_dsi_pll_enabled(dev_priv); 2136 assert_dsi_pll_enabled(dev_priv);
2134 else 2137 else
2135 assert_pll_enabled(dev_priv, pipe); 2138 assert_pll_enabled(dev_priv, pipe);
@@ -2170,7 +2173,7 @@ static void intel_disable_pipe(struct intel_crtc *crtc)
2170 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 2173 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
2171 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 2174 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
2172 enum pipe pipe = crtc->pipe; 2175 enum pipe pipe = crtc->pipe;
2173 int reg; 2176 i915_reg_t reg;
2174 u32 val; 2177 u32 val;
2175 2178
2176 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe)); 2179 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
@@ -2269,20 +2272,20 @@ intel_fb_align_height(struct drm_device *dev, unsigned int height,
2269 fb_format_modifier, 0)); 2272 fb_format_modifier, 0));
2270} 2273}
2271 2274
2272static int 2275static void
2273intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb, 2276intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
2274 const struct drm_plane_state *plane_state) 2277 const struct drm_plane_state *plane_state)
2275{ 2278{
2276 struct intel_rotation_info *info = &view->rotation_info; 2279 struct intel_rotation_info *info = &view->params.rotation_info;
2277 unsigned int tile_height, tile_pitch; 2280 unsigned int tile_height, tile_pitch;
2278 2281
2279 *view = i915_ggtt_view_normal; 2282 *view = i915_ggtt_view_normal;
2280 2283
2281 if (!plane_state) 2284 if (!plane_state)
2282 return 0; 2285 return;
2283 2286
2284 if (!intel_rotation_90_or_270(plane_state->rotation)) 2287 if (!intel_rotation_90_or_270(plane_state->rotation))
2285 return 0; 2288 return;
2286 2289
2287 *view = i915_ggtt_view_rotated; 2290 *view = i915_ggtt_view_rotated;
2288 2291
@@ -2309,8 +2312,6 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
2309 info->size_uv = info->width_pages_uv * info->height_pages_uv * 2312 info->size_uv = info->width_pages_uv * info->height_pages_uv *
2310 PAGE_SIZE; 2313 PAGE_SIZE;
2311 } 2314 }
2312
2313 return 0;
2314} 2315}
2315 2316
2316static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv) 2317static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv)
@@ -2329,9 +2330,7 @@ static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv)
2329int 2330int
2330intel_pin_and_fence_fb_obj(struct drm_plane *plane, 2331intel_pin_and_fence_fb_obj(struct drm_plane *plane,
2331 struct drm_framebuffer *fb, 2332 struct drm_framebuffer *fb,
2332 const struct drm_plane_state *plane_state, 2333 const struct drm_plane_state *plane_state)
2333 struct intel_engine_cs *pipelined,
2334 struct drm_i915_gem_request **pipelined_request)
2335{ 2334{
2336 struct drm_device *dev = fb->dev; 2335 struct drm_device *dev = fb->dev;
2337 struct drm_i915_private *dev_priv = dev->dev_private; 2336 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2366,9 +2365,7 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
2366 return -EINVAL; 2365 return -EINVAL;
2367 } 2366 }
2368 2367
2369 ret = intel_fill_fb_ggtt_view(&view, fb, plane_state); 2368 intel_fill_fb_ggtt_view(&view, fb, plane_state);
2370 if (ret)
2371 return ret;
2372 2369
2373 /* Note that the w/a also requires 64 PTE of padding following the 2370 /* Note that the w/a also requires 64 PTE of padding following the
2374 * bo. We currently fill all unused PTE with the shadow page and so 2371 * bo. We currently fill all unused PTE with the shadow page and so
@@ -2387,11 +2384,10 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
2387 */ 2384 */
2388 intel_runtime_pm_get(dev_priv); 2385 intel_runtime_pm_get(dev_priv);
2389 2386
2390 dev_priv->mm.interruptible = false; 2387 ret = i915_gem_object_pin_to_display_plane(obj, alignment,
2391 ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined, 2388 &view);
2392 pipelined_request, &view);
2393 if (ret) 2389 if (ret)
2394 goto err_interruptible; 2390 goto err_pm;
2395 2391
2396 /* Install a fence for tiled scan-out. Pre-i965 always needs a 2392 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2397 * fence, whereas 965+ only requires a fence if using 2393 * fence, whereas 965+ only requires a fence if using
@@ -2417,14 +2413,12 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
2417 i915_gem_object_pin_fence(obj); 2413 i915_gem_object_pin_fence(obj);
2418 } 2414 }
2419 2415
2420 dev_priv->mm.interruptible = true;
2421 intel_runtime_pm_put(dev_priv); 2416 intel_runtime_pm_put(dev_priv);
2422 return 0; 2417 return 0;
2423 2418
2424err_unpin: 2419err_unpin:
2425 i915_gem_object_unpin_from_display_plane(obj, &view); 2420 i915_gem_object_unpin_from_display_plane(obj, &view);
2426err_interruptible: 2421err_pm:
2427 dev_priv->mm.interruptible = true;
2428 intel_runtime_pm_put(dev_priv); 2422 intel_runtime_pm_put(dev_priv);
2429 return ret; 2423 return ret;
2430} 2424}
@@ -2434,12 +2428,10 @@ static void intel_unpin_fb_obj(struct drm_framebuffer *fb,
2434{ 2428{
2435 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2429 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2436 struct i915_ggtt_view view; 2430 struct i915_ggtt_view view;
2437 int ret;
2438 2431
2439 WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex)); 2432 WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2440 2433
2441 ret = intel_fill_fb_ggtt_view(&view, fb, plane_state); 2434 intel_fill_fb_ggtt_view(&view, fb, plane_state);
2442 WARN_ONCE(ret, "Couldn't get view from plane state!");
2443 2435
2444 if (view.type == I915_GGTT_VIEW_NORMAL) 2436 if (view.type == I915_GGTT_VIEW_NORMAL)
2445 i915_gem_object_unpin_fence(obj); 2437 i915_gem_object_unpin_fence(obj);
@@ -2646,11 +2638,13 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2646 return; 2638 return;
2647 2639
2648valid_fb: 2640valid_fb:
2649 plane_state->src_x = plane_state->src_y = 0; 2641 plane_state->src_x = 0;
2642 plane_state->src_y = 0;
2650 plane_state->src_w = fb->width << 16; 2643 plane_state->src_w = fb->width << 16;
2651 plane_state->src_h = fb->height << 16; 2644 plane_state->src_h = fb->height << 16;
2652 2645
2653 plane_state->crtc_x = plane_state->src_y = 0; 2646 plane_state->crtc_x = 0;
2647 plane_state->crtc_y = 0;
2654 plane_state->crtc_w = fb->width; 2648 plane_state->crtc_w = fb->width;
2655 plane_state->crtc_h = fb->height; 2649 plane_state->crtc_h = fb->height;
2656 2650
@@ -2678,7 +2672,7 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
2678 int plane = intel_crtc->plane; 2672 int plane = intel_crtc->plane;
2679 unsigned long linear_offset; 2673 unsigned long linear_offset;
2680 u32 dspcntr; 2674 u32 dspcntr;
2681 u32 reg = DSPCNTR(plane); 2675 i915_reg_t reg = DSPCNTR(plane);
2682 int pixel_size; 2676 int pixel_size;
2683 2677
2684 if (!visible || !fb) { 2678 if (!visible || !fb) {
@@ -2808,7 +2802,7 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
2808 int plane = intel_crtc->plane; 2802 int plane = intel_crtc->plane;
2809 unsigned long linear_offset; 2803 unsigned long linear_offset;
2810 u32 dspcntr; 2804 u32 dspcntr;
2811 u32 reg = DSPCNTR(plane); 2805 i915_reg_t reg = DSPCNTR(plane);
2812 int pixel_size; 2806 int pixel_size;
2813 2807
2814 if (!visible || !fb) { 2808 if (!visible || !fb) {
@@ -2933,30 +2927,32 @@ u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
2933 } 2927 }
2934} 2928}
2935 2929
2936unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane, 2930u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
2937 struct drm_i915_gem_object *obj, 2931 struct drm_i915_gem_object *obj,
2938 unsigned int plane) 2932 unsigned int plane)
2939{ 2933{
2940 const struct i915_ggtt_view *view = &i915_ggtt_view_normal; 2934 struct i915_ggtt_view view;
2941 struct i915_vma *vma; 2935 struct i915_vma *vma;
2942 unsigned char *offset; 2936 u64 offset;
2943 2937
2944 if (intel_rotation_90_or_270(intel_plane->base.state->rotation)) 2938 intel_fill_fb_ggtt_view(&view, intel_plane->base.fb,
2945 view = &i915_ggtt_view_rotated; 2939 intel_plane->base.state);
2946 2940
2947 vma = i915_gem_obj_to_ggtt_view(obj, view); 2941 vma = i915_gem_obj_to_ggtt_view(obj, &view);
2948 if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n", 2942 if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
2949 view->type)) 2943 view.type))
2950 return -1; 2944 return -1;
2951 2945
2952 offset = (unsigned char *)vma->node.start; 2946 offset = vma->node.start;
2953 2947
2954 if (plane == 1) { 2948 if (plane == 1) {
2955 offset += vma->ggtt_view.rotation_info.uv_start_page * 2949 offset += vma->ggtt_view.params.rotation_info.uv_start_page *
2956 PAGE_SIZE; 2950 PAGE_SIZE;
2957 } 2951 }
2958 2952
2959 return (unsigned long)offset; 2953 WARN_ON(upper_32_bits(offset));
2954
2955 return lower_32_bits(offset);
2960} 2956}
2961 2957
2962static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) 2958static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
@@ -3082,7 +3078,7 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
3082 u32 tile_height, plane_offset, plane_size; 3078 u32 tile_height, plane_offset, plane_size;
3083 unsigned int rotation; 3079 unsigned int rotation;
3084 int x_offset, y_offset; 3080 int x_offset, y_offset;
3085 unsigned long surf_addr; 3081 u32 surf_addr;
3086 struct intel_crtc_state *crtc_state = intel_crtc->config; 3082 struct intel_crtc_state *crtc_state = intel_crtc->config;
3087 struct intel_plane_state *plane_state; 3083 struct intel_plane_state *plane_state;
3088 int src_x = 0, src_y = 0, src_w = 0, src_h = 0; 3084 int src_x = 0, src_y = 0, src_w = 0, src_h = 0;
@@ -3180,8 +3176,8 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
3180 struct drm_device *dev = crtc->dev; 3176 struct drm_device *dev = crtc->dev;
3181 struct drm_i915_private *dev_priv = dev->dev_private; 3177 struct drm_i915_private *dev_priv = dev->dev_private;
3182 3178
3183 if (dev_priv->fbc.disable_fbc) 3179 if (dev_priv->fbc.deactivate)
3184 dev_priv->fbc.disable_fbc(dev_priv); 3180 dev_priv->fbc.deactivate(dev_priv);
3185 3181
3186 dev_priv->display.update_primary_plane(crtc, fb, x, y); 3182 dev_priv->display.update_primary_plane(crtc, fb, x, y);
3187 3183
@@ -3210,10 +3206,9 @@ static void intel_update_primary_planes(struct drm_device *dev)
3210 struct intel_plane_state *plane_state; 3206 struct intel_plane_state *plane_state;
3211 3207
3212 drm_modeset_lock_crtc(crtc, &plane->base); 3208 drm_modeset_lock_crtc(crtc, &plane->base);
3213
3214 plane_state = to_intel_plane_state(plane->base.state); 3209 plane_state = to_intel_plane_state(plane->base.state);
3215 3210
3216 if (plane_state->base.fb) 3211 if (crtc->state->active && plane_state->base.fb)
3217 plane->commit_plane(&plane->base, plane_state); 3212 plane->commit_plane(&plane->base, plane_state);
3218 3213
3219 drm_modeset_unlock_crtc(crtc); 3214 drm_modeset_unlock_crtc(crtc);
@@ -3289,32 +3284,6 @@ void intel_finish_reset(struct drm_device *dev)
3289 drm_modeset_unlock_all(dev); 3284 drm_modeset_unlock_all(dev);
3290} 3285}
3291 3286
3292static void
3293intel_finish_fb(struct drm_framebuffer *old_fb)
3294{
3295 struct drm_i915_gem_object *obj = intel_fb_obj(old_fb);
3296 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3297 bool was_interruptible = dev_priv->mm.interruptible;
3298 int ret;
3299
3300 /* Big Hammer, we also need to ensure that any pending
3301 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
3302 * current scanout is retired before unpinning the old
3303 * framebuffer. Note that we rely on userspace rendering
3304 * into the buffer attached to the pipe they are waiting
3305 * on. If not, userspace generates a GPU hang with IPEHR
3306 * point to the MI_WAIT_FOR_EVENT.
3307 *
3308 * This should only fail upon a hung GPU, in which case we
3309 * can safely continue.
3310 */
3311 dev_priv->mm.interruptible = false;
3312 ret = i915_gem_object_wait_rendering(obj, true);
3313 dev_priv->mm.interruptible = was_interruptible;
3314
3315 WARN_ON(ret);
3316}
3317
3318static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) 3287static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
3319{ 3288{
3320 struct drm_device *dev = crtc->dev; 3289 struct drm_device *dev = crtc->dev;
@@ -3384,7 +3353,8 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
3384 struct drm_i915_private *dev_priv = dev->dev_private; 3353 struct drm_i915_private *dev_priv = dev->dev_private;
3385 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3354 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3386 int pipe = intel_crtc->pipe; 3355 int pipe = intel_crtc->pipe;
3387 u32 reg, temp; 3356 i915_reg_t reg;
3357 u32 temp;
3388 3358
3389 /* enable normal train */ 3359 /* enable normal train */
3390 reg = FDI_TX_CTL(pipe); 3360 reg = FDI_TX_CTL(pipe);
@@ -3426,7 +3396,8 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
3426 struct drm_i915_private *dev_priv = dev->dev_private; 3396 struct drm_i915_private *dev_priv = dev->dev_private;
3427 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3397 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3428 int pipe = intel_crtc->pipe; 3398 int pipe = intel_crtc->pipe;
3429 u32 reg, temp, tries; 3399 i915_reg_t reg;
3400 u32 temp, tries;
3430 3401
3431 /* FDI needs bits from pipe first */ 3402 /* FDI needs bits from pipe first */
3432 assert_pipe_enabled(dev_priv, pipe); 3403 assert_pipe_enabled(dev_priv, pipe);
@@ -3526,7 +3497,8 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
3526 struct drm_i915_private *dev_priv = dev->dev_private; 3497 struct drm_i915_private *dev_priv = dev->dev_private;
3527 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3498 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3528 int pipe = intel_crtc->pipe; 3499 int pipe = intel_crtc->pipe;
3529 u32 reg, temp, i, retry; 3500 i915_reg_t reg;
3501 u32 temp, i, retry;
3530 3502
3531 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3503 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3532 for train result */ 3504 for train result */
@@ -3658,7 +3630,8 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
3658 struct drm_i915_private *dev_priv = dev->dev_private; 3630 struct drm_i915_private *dev_priv = dev->dev_private;
3659 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3631 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3660 int pipe = intel_crtc->pipe; 3632 int pipe = intel_crtc->pipe;
3661 u32 reg, temp, i, j; 3633 i915_reg_t reg;
3634 u32 temp, i, j;
3662 3635
3663 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3636 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3664 for train result */ 3637 for train result */
@@ -3775,8 +3748,8 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
3775 struct drm_device *dev = intel_crtc->base.dev; 3748 struct drm_device *dev = intel_crtc->base.dev;
3776 struct drm_i915_private *dev_priv = dev->dev_private; 3749 struct drm_i915_private *dev_priv = dev->dev_private;
3777 int pipe = intel_crtc->pipe; 3750 int pipe = intel_crtc->pipe;
3778 u32 reg, temp; 3751 i915_reg_t reg;
3779 3752 u32 temp;
3780 3753
3781 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 3754 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
3782 reg = FDI_RX_CTL(pipe); 3755 reg = FDI_RX_CTL(pipe);
@@ -3812,7 +3785,8 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3812 struct drm_device *dev = intel_crtc->base.dev; 3785 struct drm_device *dev = intel_crtc->base.dev;
3813 struct drm_i915_private *dev_priv = dev->dev_private; 3786 struct drm_i915_private *dev_priv = dev->dev_private;
3814 int pipe = intel_crtc->pipe; 3787 int pipe = intel_crtc->pipe;
3815 u32 reg, temp; 3788 i915_reg_t reg;
3789 u32 temp;
3816 3790
3817 /* Switch from PCDclk to Rawclk */ 3791 /* Switch from PCDclk to Rawclk */
3818 reg = FDI_RX_CTL(pipe); 3792 reg = FDI_RX_CTL(pipe);
@@ -3842,7 +3816,8 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
3842 struct drm_i915_private *dev_priv = dev->dev_private; 3816 struct drm_i915_private *dev_priv = dev->dev_private;
3843 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3817 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3844 int pipe = intel_crtc->pipe; 3818 int pipe = intel_crtc->pipe;
3845 u32 reg, temp; 3819 i915_reg_t reg;
3820 u32 temp;
3846 3821
3847 /* disable CPU FDI tx and PCH FDI rx */ 3822 /* disable CPU FDI tx and PCH FDI rx */
3848 reg = FDI_TX_CTL(pipe); 3823 reg = FDI_TX_CTL(pipe);
@@ -3935,15 +3910,23 @@ static void page_flip_completed(struct intel_crtc *intel_crtc)
3935 work->pending_flip_obj); 3910 work->pending_flip_obj);
3936} 3911}
3937 3912
3938void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) 3913static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3939{ 3914{
3940 struct drm_device *dev = crtc->dev; 3915 struct drm_device *dev = crtc->dev;
3941 struct drm_i915_private *dev_priv = dev->dev_private; 3916 struct drm_i915_private *dev_priv = dev->dev_private;
3917 long ret;
3942 3918
3943 WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); 3919 WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
3944 if (WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue, 3920
3945 !intel_crtc_has_pending_flip(crtc), 3921 ret = wait_event_interruptible_timeout(
3946 60*HZ) == 0)) { 3922 dev_priv->pending_flip_queue,
3923 !intel_crtc_has_pending_flip(crtc),
3924 60*HZ);
3925
3926 if (ret < 0)
3927 return ret;
3928
3929 if (ret == 0) {
3947 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3930 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3948 3931
3949 spin_lock_irq(&dev->event_lock); 3932 spin_lock_irq(&dev->event_lock);
@@ -3954,11 +3937,7 @@ void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3954 spin_unlock_irq(&dev->event_lock); 3937 spin_unlock_irq(&dev->event_lock);
3955 } 3938 }
3956 3939
3957 if (crtc->primary->fb) { 3940 return 0;
3958 mutex_lock(&dev->struct_mutex);
3959 intel_finish_fb(crtc->primary->fb);
3960 mutex_unlock(&dev->struct_mutex);
3961 }
3962} 3941}
3963 3942
3964/* Program iCLKIP clock to the desired frequency */ 3943/* Program iCLKIP clock to the desired frequency */
@@ -4118,6 +4097,22 @@ static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
4118 } 4097 }
4119} 4098}
4120 4099
4100/* Return which DP Port should be selected for Transcoder DP control */
4101static enum port
4102intel_trans_dp_port_sel(struct drm_crtc *crtc)
4103{
4104 struct drm_device *dev = crtc->dev;
4105 struct intel_encoder *encoder;
4106
4107 for_each_encoder_on_crtc(dev, crtc, encoder) {
4108 if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4109 encoder->type == INTEL_OUTPUT_EDP)
4110 return enc_to_dig_port(&encoder->base)->port;
4111 }
4112
4113 return -1;
4114}
4115
4121/* 4116/*
4122 * Enable PCH resources required for PCH ports: 4117 * Enable PCH resources required for PCH ports:
4123 * - PCH PLLs 4118 * - PCH PLLs
@@ -4132,7 +4127,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
4132 struct drm_i915_private *dev_priv = dev->dev_private; 4127 struct drm_i915_private *dev_priv = dev->dev_private;
4133 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4128 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4134 int pipe = intel_crtc->pipe; 4129 int pipe = intel_crtc->pipe;
4135 u32 reg, temp; 4130 u32 temp;
4136 4131
4137 assert_pch_transcoder_disabled(dev_priv, pipe); 4132 assert_pch_transcoder_disabled(dev_priv, pipe);
4138 4133
@@ -4144,6 +4139,12 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
4144 I915_WRITE(FDI_RX_TUSIZE1(pipe), 4139 I915_WRITE(FDI_RX_TUSIZE1(pipe),
4145 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); 4140 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4146 4141
4142 /*
4143 * Sometimes spurious CPU pipe underruns happen during FDI
4144 * training, at least with VGA+HDMI cloning. Suppress them.
4145 */
4146 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4147
4147 /* For PCH output, training FDI link */ 4148 /* For PCH output, training FDI link */
4148 dev_priv->display.fdi_link_train(crtc); 4149 dev_priv->display.fdi_link_train(crtc);
4149 4150
@@ -4177,10 +4178,14 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
4177 4178
4178 intel_fdi_normal_train(crtc); 4179 intel_fdi_normal_train(crtc);
4179 4180
4181 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4182
4180 /* For PCH DP, enable TRANS_DP_CTL */ 4183 /* For PCH DP, enable TRANS_DP_CTL */
4181 if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) { 4184 if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
4185 const struct drm_display_mode *adjusted_mode =
4186 &intel_crtc->config->base.adjusted_mode;
4182 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; 4187 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
4183 reg = TRANS_DP_CTL(pipe); 4188 i915_reg_t reg = TRANS_DP_CTL(pipe);
4184 temp = I915_READ(reg); 4189 temp = I915_READ(reg);
4185 temp &= ~(TRANS_DP_PORT_SEL_MASK | 4190 temp &= ~(TRANS_DP_PORT_SEL_MASK |
4186 TRANS_DP_SYNC_MASK | 4191 TRANS_DP_SYNC_MASK |
@@ -4188,19 +4193,19 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
4188 temp |= TRANS_DP_OUTPUT_ENABLE; 4193 temp |= TRANS_DP_OUTPUT_ENABLE;
4189 temp |= bpc << 9; /* same format but at 11:9 */ 4194 temp |= bpc << 9; /* same format but at 11:9 */
4190 4195
4191 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) 4196 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
4192 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; 4197 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
4193 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) 4198 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
4194 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; 4199 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
4195 4200
4196 switch (intel_trans_dp_port_sel(crtc)) { 4201 switch (intel_trans_dp_port_sel(crtc)) {
4197 case PCH_DP_B: 4202 case PORT_B:
4198 temp |= TRANS_DP_PORT_SEL_B; 4203 temp |= TRANS_DP_PORT_SEL_B;
4199 break; 4204 break;
4200 case PCH_DP_C: 4205 case PORT_C:
4201 temp |= TRANS_DP_PORT_SEL_C; 4206 temp |= TRANS_DP_PORT_SEL_C;
4202 break; 4207 break;
4203 case PCH_DP_D: 4208 case PORT_D:
4204 temp |= TRANS_DP_PORT_SEL_D; 4209 temp |= TRANS_DP_PORT_SEL_D;
4205 break; 4210 break;
4206 default: 4211 default:
@@ -4237,6 +4242,7 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
4237 struct intel_shared_dpll *pll; 4242 struct intel_shared_dpll *pll;
4238 struct intel_shared_dpll_config *shared_dpll; 4243 struct intel_shared_dpll_config *shared_dpll;
4239 enum intel_dpll_id i; 4244 enum intel_dpll_id i;
4245 int max = dev_priv->num_shared_dpll;
4240 4246
4241 shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state); 4247 shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
4242 4248
@@ -4271,9 +4277,11 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
4271 WARN_ON(shared_dpll[i].crtc_mask); 4277 WARN_ON(shared_dpll[i].crtc_mask);
4272 4278
4273 goto found; 4279 goto found;
4274 } 4280 } else if (INTEL_INFO(dev_priv)->gen < 9 && HAS_DDI(dev_priv))
4281 /* Do not consider SPLL */
4282 max = 2;
4275 4283
4276 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 4284 for (i = 0; i < max; i++) {
4277 pll = &dev_priv->shared_dplls[i]; 4285 pll = &dev_priv->shared_dplls[i];
4278 4286
4279 /* Only want to check enabled timings first */ 4287 /* Only want to check enabled timings first */
@@ -4337,7 +4345,7 @@ static void intel_shared_dpll_commit(struct drm_atomic_state *state)
4337static void cpt_verify_modeset(struct drm_device *dev, int pipe) 4345static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4338{ 4346{
4339 struct drm_i915_private *dev_priv = dev->dev_private; 4347 struct drm_i915_private *dev_priv = dev->dev_private;
4340 int dslreg = PIPEDSL(pipe); 4348 i915_reg_t dslreg = PIPEDSL(pipe);
4341 u32 temp; 4349 u32 temp;
4342 4350
4343 temp = I915_READ(dslreg); 4351 temp = I915_READ(dslreg);
@@ -4630,7 +4638,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
4630 return; 4638 return;
4631 4639
4632 if (HAS_GMCH_DISPLAY(dev_priv->dev)) { 4640 if (HAS_GMCH_DISPLAY(dev_priv->dev)) {
4633 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI)) 4641 if (intel_crtc->config->has_dsi_encoder)
4634 assert_dsi_pll_enabled(dev_priv); 4642 assert_dsi_pll_enabled(dev_priv);
4635 else 4643 else
4636 assert_pll_enabled(dev_priv, pipe); 4644 assert_pll_enabled(dev_priv, pipe);
@@ -4647,7 +4655,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
4647 } 4655 }
4648 4656
4649 for (i = 0; i < 256; i++) { 4657 for (i = 0; i < 256; i++) {
4650 u32 palreg; 4658 i915_reg_t palreg;
4651 4659
4652 if (HAS_GMCH_DISPLAY(dev)) 4660 if (HAS_GMCH_DISPLAY(dev))
4653 palreg = PALETTE(pipe, i); 4661 palreg = PALETTE(pipe, i);
@@ -4726,9 +4734,9 @@ intel_post_enable_primary(struct drm_crtc *crtc)
4726 if (IS_GEN2(dev)) 4734 if (IS_GEN2(dev))
4727 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4735 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4728 4736
4729 /* Underruns don't raise interrupts, so check manually. */ 4737 /* Underruns don't always raise interrupts, so check manually. */
4730 if (HAS_GMCH_DISPLAY(dev)) 4738 intel_check_cpu_fifo_underruns(dev_priv);
4731 i9xx_check_fifo_underruns(dev_priv); 4739 intel_check_pch_fifo_underruns(dev_priv);
4732} 4740}
4733 4741
4734/** 4742/**
@@ -4786,8 +4794,6 @@ static void intel_post_plane_update(struct intel_crtc *crtc)
4786{ 4794{
4787 struct intel_crtc_atomic_commit *atomic = &crtc->atomic; 4795 struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
4788 struct drm_device *dev = crtc->base.dev; 4796 struct drm_device *dev = crtc->base.dev;
4789 struct drm_i915_private *dev_priv = dev->dev_private;
4790 struct drm_plane *plane;
4791 4797
4792 if (atomic->wait_vblank) 4798 if (atomic->wait_vblank)
4793 intel_wait_for_vblank(dev, crtc->pipe); 4799 intel_wait_for_vblank(dev, crtc->pipe);
@@ -4801,15 +4807,11 @@ static void intel_post_plane_update(struct intel_crtc *crtc)
4801 intel_update_watermarks(&crtc->base); 4807 intel_update_watermarks(&crtc->base);
4802 4808
4803 if (atomic->update_fbc) 4809 if (atomic->update_fbc)
4804 intel_fbc_update(dev_priv); 4810 intel_fbc_update(crtc);
4805 4811
4806 if (atomic->post_enable_primary) 4812 if (atomic->post_enable_primary)
4807 intel_post_enable_primary(&crtc->base); 4813 intel_post_enable_primary(&crtc->base);
4808 4814
4809 drm_for_each_plane_mask(plane, dev, atomic->update_sprite_watermarks)
4810 intel_update_sprite_watermarks(plane, &crtc->base,
4811 0, 0, 0, false, false);
4812
4813 memset(atomic, 0, sizeof(*atomic)); 4815 memset(atomic, 0, sizeof(*atomic));
4814} 4816}
4815 4817
@@ -4818,23 +4820,9 @@ static void intel_pre_plane_update(struct intel_crtc *crtc)
4818 struct drm_device *dev = crtc->base.dev; 4820 struct drm_device *dev = crtc->base.dev;
4819 struct drm_i915_private *dev_priv = dev->dev_private; 4821 struct drm_i915_private *dev_priv = dev->dev_private;
4820 struct intel_crtc_atomic_commit *atomic = &crtc->atomic; 4822 struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
4821 struct drm_plane *p;
4822
4823 /* Track fb's for any planes being disabled */
4824 drm_for_each_plane_mask(p, dev, atomic->disabled_planes) {
4825 struct intel_plane *plane = to_intel_plane(p);
4826
4827 mutex_lock(&dev->struct_mutex);
4828 i915_gem_track_fb(intel_fb_obj(plane->base.fb), NULL,
4829 plane->frontbuffer_bit);
4830 mutex_unlock(&dev->struct_mutex);
4831 }
4832
4833 if (atomic->wait_for_flips)
4834 intel_crtc_wait_for_pending_flips(&crtc->base);
4835 4823
4836 if (atomic->disable_fbc) 4824 if (atomic->disable_fbc)
4837 intel_fbc_disable_crtc(crtc); 4825 intel_fbc_deactivate(crtc);
4838 4826
4839 if (crtc->atomic.disable_ips) 4827 if (crtc->atomic.disable_ips)
4840 hsw_disable_ips(crtc); 4828 hsw_disable_ips(crtc);
@@ -4880,6 +4868,9 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
4880 return; 4868 return;
4881 4869
4882 if (intel_crtc->config->has_pch_encoder) 4870 if (intel_crtc->config->has_pch_encoder)
4871 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
4872
4873 if (intel_crtc->config->has_pch_encoder)
4883 intel_prepare_shared_dpll(intel_crtc); 4874 intel_prepare_shared_dpll(intel_crtc);
4884 4875
4885 if (intel_crtc->config->has_dp_encoder) 4876 if (intel_crtc->config->has_dp_encoder)
@@ -4897,7 +4888,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
4897 intel_crtc->active = true; 4888 intel_crtc->active = true;
4898 4889
4899 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4890 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4900 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
4901 4891
4902 for_each_encoder_on_crtc(dev, crtc, encoder) 4892 for_each_encoder_on_crtc(dev, crtc, encoder)
4903 if (encoder->pre_enable) 4893 if (encoder->pre_enable)
@@ -4935,6 +4925,13 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
4935 4925
4936 if (HAS_PCH_CPT(dev)) 4926 if (HAS_PCH_CPT(dev))
4937 cpt_verify_modeset(dev, intel_crtc->pipe); 4927 cpt_verify_modeset(dev, intel_crtc->pipe);
4928
4929 /* Must wait for vblank to avoid spurious PCH FIFO underruns */
4930 if (intel_crtc->config->has_pch_encoder)
4931 intel_wait_for_vblank(dev, pipe);
4932 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
4933
4934 intel_fbc_enable(intel_crtc);
4938} 4935}
4939 4936
4940/* IPS only exists on ULT machines and is tied to pipe A. */ 4937/* IPS only exists on ULT machines and is tied to pipe A. */
@@ -4952,11 +4949,14 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4952 int pipe = intel_crtc->pipe, hsw_workaround_pipe; 4949 int pipe = intel_crtc->pipe, hsw_workaround_pipe;
4953 struct intel_crtc_state *pipe_config = 4950 struct intel_crtc_state *pipe_config =
4954 to_intel_crtc_state(crtc->state); 4951 to_intel_crtc_state(crtc->state);
4955 bool is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
4956 4952
4957 if (WARN_ON(intel_crtc->active)) 4953 if (WARN_ON(intel_crtc->active))
4958 return; 4954 return;
4959 4955
4956 if (intel_crtc->config->has_pch_encoder)
4957 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4958 false);
4959
4960 if (intel_crtc_to_shared_dpll(intel_crtc)) 4960 if (intel_crtc_to_shared_dpll(intel_crtc))
4961 intel_enable_shared_dpll(intel_crtc); 4961 intel_enable_shared_dpll(intel_crtc);
4962 4962
@@ -4981,21 +4981,20 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4981 4981
4982 intel_crtc->active = true; 4982 intel_crtc->active = true;
4983 4983
4984 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4984 if (intel_crtc->config->has_pch_encoder)
4985 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4986 else
4987 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4988
4985 for_each_encoder_on_crtc(dev, crtc, encoder) { 4989 for_each_encoder_on_crtc(dev, crtc, encoder) {
4986 if (encoder->pre_pll_enable)
4987 encoder->pre_pll_enable(encoder);
4988 if (encoder->pre_enable) 4990 if (encoder->pre_enable)
4989 encoder->pre_enable(encoder); 4991 encoder->pre_enable(encoder);
4990 } 4992 }
4991 4993
4992 if (intel_crtc->config->has_pch_encoder) { 4994 if (intel_crtc->config->has_pch_encoder)
4993 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4994 true);
4995 dev_priv->display.fdi_link_train(crtc); 4995 dev_priv->display.fdi_link_train(crtc);
4996 }
4997 4996
4998 if (!is_dsi) 4997 if (!intel_crtc->config->has_dsi_encoder)
4999 intel_ddi_enable_pipe_clock(intel_crtc); 4998 intel_ddi_enable_pipe_clock(intel_crtc);
5000 4999
5001 if (INTEL_INFO(dev)->gen >= 9) 5000 if (INTEL_INFO(dev)->gen >= 9)
@@ -5010,7 +5009,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
5010 intel_crtc_load_lut(crtc); 5009 intel_crtc_load_lut(crtc);
5011 5010
5012 intel_ddi_set_pipe_settings(crtc); 5011 intel_ddi_set_pipe_settings(crtc);
5013 if (!is_dsi) 5012 if (!intel_crtc->config->has_dsi_encoder)
5014 intel_ddi_enable_transcoder_func(crtc); 5013 intel_ddi_enable_transcoder_func(crtc);
5015 5014
5016 intel_update_watermarks(crtc); 5015 intel_update_watermarks(crtc);
@@ -5019,7 +5018,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
5019 if (intel_crtc->config->has_pch_encoder) 5018 if (intel_crtc->config->has_pch_encoder)
5020 lpt_pch_enable(crtc); 5019 lpt_pch_enable(crtc);
5021 5020
5022 if (intel_crtc->config->dp_encoder_is_mst && !is_dsi) 5021 if (intel_crtc->config->dp_encoder_is_mst)
5023 intel_ddi_set_vc_payload_alloc(crtc, true); 5022 intel_ddi_set_vc_payload_alloc(crtc, true);
5024 5023
5025 assert_vblank_disabled(crtc); 5024 assert_vblank_disabled(crtc);
@@ -5030,6 +5029,14 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
5030 intel_opregion_notify_encoder(encoder, true); 5029 intel_opregion_notify_encoder(encoder, true);
5031 } 5030 }
5032 5031
5032 if (intel_crtc->config->has_pch_encoder) {
5033 intel_wait_for_vblank(dev, pipe);
5034 intel_wait_for_vblank(dev, pipe);
5035 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5036 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5037 true);
5038 }
5039
5033 /* If we change the relative order between pipe/planes enabling, we need 5040 /* If we change the relative order between pipe/planes enabling, we need
5034 * to change the workaround. */ 5041 * to change the workaround. */
5035 hsw_workaround_pipe = pipe_config->hsw_workaround_pipe; 5042 hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
@@ -5037,6 +5044,8 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
5037 intel_wait_for_vblank(dev, hsw_workaround_pipe); 5044 intel_wait_for_vblank(dev, hsw_workaround_pipe);
5038 intel_wait_for_vblank(dev, hsw_workaround_pipe); 5045 intel_wait_for_vblank(dev, hsw_workaround_pipe);
5039 } 5046 }
5047
5048 intel_fbc_enable(intel_crtc);
5040} 5049}
5041 5050
5042static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force) 5051static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
@@ -5061,7 +5070,9 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
5061 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5070 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5062 struct intel_encoder *encoder; 5071 struct intel_encoder *encoder;
5063 int pipe = intel_crtc->pipe; 5072 int pipe = intel_crtc->pipe;
5064 u32 reg, temp; 5073
5074 if (intel_crtc->config->has_pch_encoder)
5075 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
5065 5076
5066 for_each_encoder_on_crtc(dev, crtc, encoder) 5077 for_each_encoder_on_crtc(dev, crtc, encoder)
5067 encoder->disable(encoder); 5078 encoder->disable(encoder);
@@ -5069,15 +5080,22 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
5069 drm_crtc_vblank_off(crtc); 5080 drm_crtc_vblank_off(crtc);
5070 assert_vblank_disabled(crtc); 5081 assert_vblank_disabled(crtc);
5071 5082
5083 /*
5084 * Sometimes spurious CPU pipe underruns happen when the
5085 * pipe is already disabled, but FDI RX/TX is still enabled.
5086 * Happens at least with VGA+HDMI cloning. Suppress them.
5087 */
5072 if (intel_crtc->config->has_pch_encoder) 5088 if (intel_crtc->config->has_pch_encoder)
5073 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 5089 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5074 5090
5075 intel_disable_pipe(intel_crtc); 5091 intel_disable_pipe(intel_crtc);
5076 5092
5077 ironlake_pfit_disable(intel_crtc, false); 5093 ironlake_pfit_disable(intel_crtc, false);
5078 5094
5079 if (intel_crtc->config->has_pch_encoder) 5095 if (intel_crtc->config->has_pch_encoder) {
5080 ironlake_fdi_disable(crtc); 5096 ironlake_fdi_disable(crtc);
5097 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5098 }
5081 5099
5082 for_each_encoder_on_crtc(dev, crtc, encoder) 5100 for_each_encoder_on_crtc(dev, crtc, encoder)
5083 if (encoder->post_disable) 5101 if (encoder->post_disable)
@@ -5087,6 +5105,9 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
5087 ironlake_disable_pch_transcoder(dev_priv, pipe); 5105 ironlake_disable_pch_transcoder(dev_priv, pipe);
5088 5106
5089 if (HAS_PCH_CPT(dev)) { 5107 if (HAS_PCH_CPT(dev)) {
5108 i915_reg_t reg;
5109 u32 temp;
5110
5090 /* disable TRANS_DP_CTL */ 5111 /* disable TRANS_DP_CTL */
5091 reg = TRANS_DP_CTL(pipe); 5112 reg = TRANS_DP_CTL(pipe);
5092 temp = I915_READ(reg); 5113 temp = I915_READ(reg);
@@ -5103,6 +5124,10 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
5103 5124
5104 ironlake_fdi_pll_disable(intel_crtc); 5125 ironlake_fdi_pll_disable(intel_crtc);
5105 } 5126 }
5127
5128 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
5129
5130 intel_fbc_disable_crtc(intel_crtc);
5106} 5131}
5107 5132
5108static void haswell_crtc_disable(struct drm_crtc *crtc) 5133static void haswell_crtc_disable(struct drm_crtc *crtc)
@@ -5112,7 +5137,10 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
5112 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5137 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5113 struct intel_encoder *encoder; 5138 struct intel_encoder *encoder;
5114 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 5139 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
5115 bool is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI); 5140
5141 if (intel_crtc->config->has_pch_encoder)
5142 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5143 false);
5116 5144
5117 for_each_encoder_on_crtc(dev, crtc, encoder) { 5145 for_each_encoder_on_crtc(dev, crtc, encoder) {
5118 intel_opregion_notify_encoder(encoder, false); 5146 intel_opregion_notify_encoder(encoder, false);
@@ -5122,15 +5150,12 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
5122 drm_crtc_vblank_off(crtc); 5150 drm_crtc_vblank_off(crtc);
5123 assert_vblank_disabled(crtc); 5151 assert_vblank_disabled(crtc);
5124 5152
5125 if (intel_crtc->config->has_pch_encoder)
5126 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5127 false);
5128 intel_disable_pipe(intel_crtc); 5153 intel_disable_pipe(intel_crtc);
5129 5154
5130 if (intel_crtc->config->dp_encoder_is_mst) 5155 if (intel_crtc->config->dp_encoder_is_mst)
5131 intel_ddi_set_vc_payload_alloc(crtc, false); 5156 intel_ddi_set_vc_payload_alloc(crtc, false);
5132 5157
5133 if (!is_dsi) 5158 if (!intel_crtc->config->has_dsi_encoder)
5134 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); 5159 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
5135 5160
5136 if (INTEL_INFO(dev)->gen >= 9) 5161 if (INTEL_INFO(dev)->gen >= 9)
@@ -5138,7 +5163,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
5138 else 5163 else
5139 ironlake_pfit_disable(intel_crtc, false); 5164 ironlake_pfit_disable(intel_crtc, false);
5140 5165
5141 if (!is_dsi) 5166 if (!intel_crtc->config->has_dsi_encoder)
5142 intel_ddi_disable_pipe_clock(intel_crtc); 5167 intel_ddi_disable_pipe_clock(intel_crtc);
5143 5168
5144 if (intel_crtc->config->has_pch_encoder) { 5169 if (intel_crtc->config->has_pch_encoder) {
@@ -5149,6 +5174,12 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
5149 for_each_encoder_on_crtc(dev, crtc, encoder) 5174 for_each_encoder_on_crtc(dev, crtc, encoder)
5150 if (encoder->post_disable) 5175 if (encoder->post_disable)
5151 encoder->post_disable(encoder); 5176 encoder->post_disable(encoder);
5177
5178 if (intel_crtc->config->has_pch_encoder)
5179 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5180 true);
5181
5182 intel_fbc_disable_crtc(intel_crtc);
5152} 5183}
5153 5184
5154static void i9xx_pfit_enable(struct intel_crtc *crtc) 5185static void i9xx_pfit_enable(struct intel_crtc *crtc)
@@ -5179,24 +5210,40 @@ static enum intel_display_power_domain port_to_power_domain(enum port port)
5179{ 5210{
5180 switch (port) { 5211 switch (port) {
5181 case PORT_A: 5212 case PORT_A:
5182 return POWER_DOMAIN_PORT_DDI_A_4_LANES; 5213 return POWER_DOMAIN_PORT_DDI_A_LANES;
5183 case PORT_B: 5214 case PORT_B:
5184 return POWER_DOMAIN_PORT_DDI_B_4_LANES; 5215 return POWER_DOMAIN_PORT_DDI_B_LANES;
5185 case PORT_C: 5216 case PORT_C:
5186 return POWER_DOMAIN_PORT_DDI_C_4_LANES; 5217 return POWER_DOMAIN_PORT_DDI_C_LANES;
5187 case PORT_D: 5218 case PORT_D:
5188 return POWER_DOMAIN_PORT_DDI_D_4_LANES; 5219 return POWER_DOMAIN_PORT_DDI_D_LANES;
5189 case PORT_E: 5220 case PORT_E:
5190 return POWER_DOMAIN_PORT_DDI_E_2_LANES; 5221 return POWER_DOMAIN_PORT_DDI_E_LANES;
5191 default: 5222 default:
5192 WARN_ON_ONCE(1); 5223 MISSING_CASE(port);
5193 return POWER_DOMAIN_PORT_OTHER; 5224 return POWER_DOMAIN_PORT_OTHER;
5194 } 5225 }
5195} 5226}
5196 5227
5197#define for_each_power_domain(domain, mask) \ 5228static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
5198 for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ 5229{
5199 if ((1 << (domain)) & (mask)) 5230 switch (port) {
5231 case PORT_A:
5232 return POWER_DOMAIN_AUX_A;
5233 case PORT_B:
5234 return POWER_DOMAIN_AUX_B;
5235 case PORT_C:
5236 return POWER_DOMAIN_AUX_C;
5237 case PORT_D:
5238 return POWER_DOMAIN_AUX_D;
5239 case PORT_E:
5240 /* FIXME: Check VBT for actual wiring of PORT E */
5241 return POWER_DOMAIN_AUX_D;
5242 default:
5243 MISSING_CASE(port);
5244 return POWER_DOMAIN_AUX_A;
5245 }
5246}
5200 5247
5201enum intel_display_power_domain 5248enum intel_display_power_domain
5202intel_display_port_power_domain(struct intel_encoder *intel_encoder) 5249intel_display_port_power_domain(struct intel_encoder *intel_encoder)
@@ -5225,6 +5272,36 @@ intel_display_port_power_domain(struct intel_encoder *intel_encoder)
5225 } 5272 }
5226} 5273}
5227 5274
5275enum intel_display_power_domain
5276intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
5277{
5278 struct drm_device *dev = intel_encoder->base.dev;
5279 struct intel_digital_port *intel_dig_port;
5280
5281 switch (intel_encoder->type) {
5282 case INTEL_OUTPUT_UNKNOWN:
5283 case INTEL_OUTPUT_HDMI:
5284 /*
5285 * Only DDI platforms should ever use these output types.
5286 * We can get here after the HDMI detect code has already set
5287 * the type of the shared encoder. Since we can't be sure
5288 * what's the status of the given connectors, play safe and
5289 * run the DP detection too.
5290 */
5291 WARN_ON_ONCE(!HAS_DDI(dev));
5292 case INTEL_OUTPUT_DISPLAYPORT:
5293 case INTEL_OUTPUT_EDP:
5294 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5295 return port_to_aux_power_domain(intel_dig_port->port);
5296 case INTEL_OUTPUT_DP_MST:
5297 intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5298 return port_to_aux_power_domain(intel_dig_port->port);
5299 default:
5300 MISSING_CASE(intel_encoder->type);
5301 return POWER_DOMAIN_AUX_A;
5302 }
5303}
5304
5228static unsigned long get_crtc_power_domains(struct drm_crtc *crtc) 5305static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
5229{ 5306{
5230 struct drm_device *dev = crtc->dev; 5307 struct drm_device *dev = crtc->dev;
@@ -5232,13 +5309,11 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
5232 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5309 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5233 enum pipe pipe = intel_crtc->pipe; 5310 enum pipe pipe = intel_crtc->pipe;
5234 unsigned long mask; 5311 unsigned long mask;
5235 enum transcoder transcoder; 5312 enum transcoder transcoder = intel_crtc->config->cpu_transcoder;
5236 5313
5237 if (!crtc->state->active) 5314 if (!crtc->state->active)
5238 return 0; 5315 return 0;
5239 5316
5240 transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
5241
5242 mask = BIT(POWER_DOMAIN_PIPE(pipe)); 5317 mask = BIT(POWER_DOMAIN_PIPE(pipe));
5243 mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder)); 5318 mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
5244 if (intel_crtc->config->pch_pfit.enabled || 5319 if (intel_crtc->config->pch_pfit.enabled ||
@@ -5325,7 +5400,7 @@ static void intel_update_max_cdclk(struct drm_device *dev)
5325{ 5400{
5326 struct drm_i915_private *dev_priv = dev->dev_private; 5401 struct drm_i915_private *dev_priv = dev->dev_private;
5327 5402
5328 if (IS_SKYLAKE(dev)) { 5403 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
5329 u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK; 5404 u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
5330 5405
5331 if (limit == SKL_DFSM_CDCLK_LIMIT_675) 5406 if (limit == SKL_DFSM_CDCLK_LIMIT_675)
@@ -5742,32 +5817,16 @@ void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
5742 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE) 5817 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5743 DRM_ERROR("DBuf power disable timeout\n"); 5818 DRM_ERROR("DBuf power disable timeout\n");
5744 5819
5745 /* 5820 /* disable DPLL0 */
5746 * DMC assumes ownership of LCPLL and will get confused if we touch it. 5821 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
5747 */ 5822 if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
5748 if (dev_priv->csr.dmc_payload) { 5823 DRM_ERROR("Couldn't disable DPLL0\n");
5749 /* disable DPLL0 */
5750 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) &
5751 ~LCPLL_PLL_ENABLE);
5752 if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
5753 DRM_ERROR("Couldn't disable DPLL0\n");
5754 }
5755
5756 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
5757} 5824}
5758 5825
5759void skl_init_cdclk(struct drm_i915_private *dev_priv) 5826void skl_init_cdclk(struct drm_i915_private *dev_priv)
5760{ 5827{
5761 u32 val;
5762 unsigned int required_vco; 5828 unsigned int required_vco;
5763 5829
5764 /* enable PCH reset handshake */
5765 val = I915_READ(HSW_NDE_RSTWRN_OPT);
5766 I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
5767
5768 /* enable PG1 and Misc I/O */
5769 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
5770
5771 /* DPLL0 not enabled (happens on early BIOS versions) */ 5830 /* DPLL0 not enabled (happens on early BIOS versions) */
5772 if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) { 5831 if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
5773 /* enable DPLL0 */ 5832 /* enable DPLL0 */
@@ -5788,6 +5847,45 @@ void skl_init_cdclk(struct drm_i915_private *dev_priv)
5788 DRM_ERROR("DBuf power enable timeout\n"); 5847 DRM_ERROR("DBuf power enable timeout\n");
5789} 5848}
5790 5849
5850int skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
5851{
5852 uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
5853 uint32_t cdctl = I915_READ(CDCLK_CTL);
5854 int freq = dev_priv->skl_boot_cdclk;
5855
5856 /*
5857 * check if the pre-os intialized the display
5858 * There is SWF18 scratchpad register defined which is set by the
5859 * pre-os which can be used by the OS drivers to check the status
5860 */
5861 if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
5862 goto sanitize;
5863
5864 /* Is PLL enabled and locked ? */
5865 if (!((lcpll1 & LCPLL_PLL_ENABLE) && (lcpll1 & LCPLL_PLL_LOCK)))
5866 goto sanitize;
5867
5868 /* DPLL okay; verify the cdclock
5869 *
5870 * Noticed in some instances that the freq selection is correct but
5871 * decimal part is programmed wrong from BIOS where pre-os does not
5872 * enable display. Verify the same as well.
5873 */
5874 if (cdctl == ((cdctl & CDCLK_FREQ_SEL_MASK) | skl_cdclk_decimal(freq)))
5875 /* All well; nothing to sanitize */
5876 return false;
5877sanitize:
5878 /*
5879 * As of now initialize with max cdclk till
5880 * we get dynamic cdclk support
5881 * */
5882 dev_priv->skl_boot_cdclk = dev_priv->max_cdclk_freq;
5883 skl_init_cdclk(dev_priv);
5884
5885 /* we did have to sanitize */
5886 return true;
5887}
5888
5791/* Adjust CDclk dividers to allow high res or save power if possible */ 5889/* Adjust CDclk dividers to allow high res or save power if possible */
5792static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) 5890static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
5793{ 5891{
@@ -6069,13 +6167,10 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
6069 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6167 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6070 struct intel_encoder *encoder; 6168 struct intel_encoder *encoder;
6071 int pipe = intel_crtc->pipe; 6169 int pipe = intel_crtc->pipe;
6072 bool is_dsi;
6073 6170
6074 if (WARN_ON(intel_crtc->active)) 6171 if (WARN_ON(intel_crtc->active))
6075 return; 6172 return;
6076 6173
6077 is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
6078
6079 if (intel_crtc->config->has_dp_encoder) 6174 if (intel_crtc->config->has_dp_encoder)
6080 intel_dp_set_m_n(intel_crtc, M1_N1); 6175 intel_dp_set_m_n(intel_crtc, M1_N1);
6081 6176
@@ -6098,7 +6193,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
6098 if (encoder->pre_pll_enable) 6193 if (encoder->pre_pll_enable)
6099 encoder->pre_pll_enable(encoder); 6194 encoder->pre_pll_enable(encoder);
6100 6195
6101 if (!is_dsi) { 6196 if (!intel_crtc->config->has_dsi_encoder) {
6102 if (IS_CHERRYVIEW(dev)) { 6197 if (IS_CHERRYVIEW(dev)) {
6103 chv_prepare_pll(intel_crtc, intel_crtc->config); 6198 chv_prepare_pll(intel_crtc, intel_crtc->config);
6104 chv_enable_pll(intel_crtc, intel_crtc->config); 6199 chv_enable_pll(intel_crtc, intel_crtc->config);
@@ -6177,6 +6272,8 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
6177 6272
6178 for_each_encoder_on_crtc(dev, crtc, encoder) 6273 for_each_encoder_on_crtc(dev, crtc, encoder)
6179 encoder->enable(encoder); 6274 encoder->enable(encoder);
6275
6276 intel_fbc_enable(intel_crtc);
6180} 6277}
6181 6278
6182static void i9xx_pfit_disable(struct intel_crtc *crtc) 6279static void i9xx_pfit_disable(struct intel_crtc *crtc)
@@ -6224,7 +6321,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
6224 if (encoder->post_disable) 6321 if (encoder->post_disable)
6225 encoder->post_disable(encoder); 6322 encoder->post_disable(encoder);
6226 6323
6227 if (!intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI)) { 6324 if (!intel_crtc->config->has_dsi_encoder) {
6228 if (IS_CHERRYVIEW(dev)) 6325 if (IS_CHERRYVIEW(dev))
6229 chv_disable_pll(dev_priv, pipe); 6326 chv_disable_pll(dev_priv, pipe);
6230 else if (IS_VALLEYVIEW(dev)) 6327 else if (IS_VALLEYVIEW(dev))
@@ -6239,6 +6336,8 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
6239 6336
6240 if (!IS_GEN2(dev)) 6337 if (!IS_GEN2(dev))
6241 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 6338 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6339
6340 intel_fbc_disable_crtc(intel_crtc);
6242} 6341}
6243 6342
6244static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) 6343static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
@@ -6252,7 +6351,8 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6252 return; 6351 return;
6253 6352
6254 if (to_intel_plane_state(crtc->primary->state)->visible) { 6353 if (to_intel_plane_state(crtc->primary->state)->visible) {
6255 intel_crtc_wait_for_pending_flips(crtc); 6354 WARN_ON(intel_crtc->unpin_work);
6355
6256 intel_pre_disable_primary(crtc); 6356 intel_pre_disable_primary(crtc);
6257 } 6357 }
6258 6358
@@ -6570,6 +6670,15 @@ static void hsw_compute_ips_config(struct intel_crtc *crtc,
6570 pipe_config_supports_ips(dev_priv, pipe_config); 6670 pipe_config_supports_ips(dev_priv, pipe_config);
6571} 6671}
6572 6672
6673static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
6674{
6675 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6676
6677 /* GDG double wide on either pipe, otherwise pipe A only */
6678 return INTEL_INFO(dev_priv)->gen < 4 &&
6679 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
6680}
6681
6573static int intel_crtc_compute_config(struct intel_crtc *crtc, 6682static int intel_crtc_compute_config(struct intel_crtc *crtc,
6574 struct intel_crtc_state *pipe_config) 6683 struct intel_crtc_state *pipe_config)
6575{ 6684{
@@ -6579,23 +6688,24 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
6579 6688
6580 /* FIXME should check pixel clock limits on all platforms */ 6689 /* FIXME should check pixel clock limits on all platforms */
6581 if (INTEL_INFO(dev)->gen < 4) { 6690 if (INTEL_INFO(dev)->gen < 4) {
6582 int clock_limit = dev_priv->max_cdclk_freq; 6691 int clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
6583 6692
6584 /* 6693 /*
6585 * Enable pixel doubling when the dot clock 6694 * Enable double wide mode when the dot clock
6586 * is > 90% of the (display) core speed. 6695 * is > 90% of the (display) core speed.
6587 *
6588 * GDG double wide on either pipe,
6589 * otherwise pipe A only.
6590 */ 6696 */
6591 if ((crtc->pipe == PIPE_A || IS_I915G(dev)) && 6697 if (intel_crtc_supports_double_wide(crtc) &&
6592 adjusted_mode->crtc_clock > clock_limit * 9 / 10) { 6698 adjusted_mode->crtc_clock > clock_limit) {
6593 clock_limit *= 2; 6699 clock_limit *= 2;
6594 pipe_config->double_wide = true; 6700 pipe_config->double_wide = true;
6595 } 6701 }
6596 6702
6597 if (adjusted_mode->crtc_clock > clock_limit * 9 / 10) 6703 if (adjusted_mode->crtc_clock > clock_limit) {
6704 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
6705 adjusted_mode->crtc_clock, clock_limit,
6706 yesno(pipe_config->double_wide));
6598 return -EINVAL; 6707 return -EINVAL;
6708 }
6599 } 6709 }
6600 6710
6601 /* 6711 /*
@@ -7360,7 +7470,7 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
7360 struct drm_device *dev = crtc->base.dev; 7470 struct drm_device *dev = crtc->base.dev;
7361 struct drm_i915_private *dev_priv = dev->dev_private; 7471 struct drm_i915_private *dev_priv = dev->dev_private;
7362 int pipe = crtc->pipe; 7472 int pipe = crtc->pipe;
7363 int dpll_reg = DPLL(crtc->pipe); 7473 i915_reg_t dpll_reg = DPLL(crtc->pipe);
7364 enum dpio_channel port = vlv_pipe_to_channel(pipe); 7474 enum dpio_channel port = vlv_pipe_to_channel(pipe);
7365 u32 loopfilter, tribuf_calcntr; 7475 u32 loopfilter, tribuf_calcntr;
7366 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; 7476 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
@@ -7826,8 +7936,6 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
7826 int refclk, num_connectors = 0; 7936 int refclk, num_connectors = 0;
7827 intel_clock_t clock; 7937 intel_clock_t clock;
7828 bool ok; 7938 bool ok;
7829 bool is_dsi = false;
7830 struct intel_encoder *encoder;
7831 const intel_limit_t *limit; 7939 const intel_limit_t *limit;
7832 struct drm_atomic_state *state = crtc_state->base.state; 7940 struct drm_atomic_state *state = crtc_state->base.state;
7833 struct drm_connector *connector; 7941 struct drm_connector *connector;
@@ -7837,26 +7945,14 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
7837 memset(&crtc_state->dpll_hw_state, 0, 7945 memset(&crtc_state->dpll_hw_state, 0,
7838 sizeof(crtc_state->dpll_hw_state)); 7946 sizeof(crtc_state->dpll_hw_state));
7839 7947
7840 for_each_connector_in_state(state, connector, connector_state, i) { 7948 if (crtc_state->has_dsi_encoder)
7841 if (connector_state->crtc != &crtc->base) 7949 return 0;
7842 continue;
7843
7844 encoder = to_intel_encoder(connector_state->best_encoder);
7845
7846 switch (encoder->type) {
7847 case INTEL_OUTPUT_DSI:
7848 is_dsi = true;
7849 break;
7850 default:
7851 break;
7852 }
7853 7950
7854 num_connectors++; 7951 for_each_connector_in_state(state, connector, connector_state, i) {
7952 if (connector_state->crtc == &crtc->base)
7953 num_connectors++;
7855 } 7954 }
7856 7955
7857 if (is_dsi)
7858 return 0;
7859
7860 if (!crtc_state->clock_set) { 7956 if (!crtc_state->clock_set) {
7861 refclk = i9xx_get_refclk(crtc_state, num_connectors); 7957 refclk = i9xx_get_refclk(crtc_state, num_connectors);
7862 7958
@@ -8849,7 +8945,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
8849 memset(&crtc_state->dpll_hw_state, 0, 8945 memset(&crtc_state->dpll_hw_state, 0,
8850 sizeof(crtc_state->dpll_hw_state)); 8946 sizeof(crtc_state->dpll_hw_state));
8851 8947
8852 is_lvds = intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS); 8948 is_lvds = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS);
8853 8949
8854 WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)), 8950 WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
8855 "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev)); 8951 "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
@@ -9278,8 +9374,8 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9278 9374
9279 I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); 9375 I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
9280 I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n"); 9376 I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
9281 I915_STATE_WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n"); 9377 I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
9282 I915_STATE_WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n"); 9378 I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
9283 I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n"); 9379 I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
9284 I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, 9380 I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
9285 "CPU PWM1 enabled\n"); 9381 "CPU PWM1 enabled\n");
@@ -9623,14 +9719,10 @@ static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
9623 else 9719 else
9624 cdclk = 337500; 9720 cdclk = 337500;
9625 9721
9626 /*
9627 * FIXME move the cdclk caclulation to
9628 * compute_config() so we can fail gracegully.
9629 */
9630 if (cdclk > dev_priv->max_cdclk_freq) { 9722 if (cdclk > dev_priv->max_cdclk_freq) {
9631 DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n", 9723 DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
9632 cdclk, dev_priv->max_cdclk_freq); 9724 cdclk, dev_priv->max_cdclk_freq);
9633 cdclk = dev_priv->max_cdclk_freq; 9725 return -EINVAL;
9634 } 9726 }
9635 9727
9636 to_intel_atomic_state(state)->cdclk = cdclk; 9728 to_intel_atomic_state(state)->cdclk = cdclk;
@@ -9723,6 +9815,9 @@ static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9723 case PORT_CLK_SEL_WRPLL2: 9815 case PORT_CLK_SEL_WRPLL2:
9724 pipe_config->shared_dpll = DPLL_ID_WRPLL2; 9816 pipe_config->shared_dpll = DPLL_ID_WRPLL2;
9725 break; 9817 break;
9818 case PORT_CLK_SEL_SPLL:
9819 pipe_config->shared_dpll = DPLL_ID_SPLL;
9820 break;
9726 } 9821 }
9727} 9822}
9728 9823
@@ -9739,7 +9834,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
9739 9834
9740 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; 9835 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
9741 9836
9742 if (IS_SKYLAKE(dev)) 9837 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
9743 skylake_get_ddi_pll(dev_priv, port, pipe_config); 9838 skylake_get_ddi_pll(dev_priv, port, pipe_config);
9744 else if (IS_BROXTON(dev)) 9839 else if (IS_BROXTON(dev))
9745 bxt_get_ddi_pll(dev_priv, port, pipe_config); 9840 bxt_get_ddi_pll(dev_priv, port, pipe_config);
@@ -10085,20 +10180,17 @@ __intel_framebuffer_create(struct drm_device *dev,
10085 int ret; 10180 int ret;
10086 10181
10087 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 10182 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10088 if (!intel_fb) { 10183 if (!intel_fb)
10089 drm_gem_object_unreference(&obj->base);
10090 return ERR_PTR(-ENOMEM); 10184 return ERR_PTR(-ENOMEM);
10091 }
10092 10185
10093 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); 10186 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
10094 if (ret) 10187 if (ret)
10095 goto err; 10188 goto err;
10096 10189
10097 return &intel_fb->base; 10190 return &intel_fb->base;
10191
10098err: 10192err:
10099 drm_gem_object_unreference(&obj->base);
10100 kfree(intel_fb); 10193 kfree(intel_fb);
10101
10102 return ERR_PTR(ret); 10194 return ERR_PTR(ret);
10103} 10195}
10104 10196
@@ -10138,6 +10230,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
10138 struct drm_display_mode *mode, 10230 struct drm_display_mode *mode,
10139 int depth, int bpp) 10231 int depth, int bpp)
10140{ 10232{
10233 struct drm_framebuffer *fb;
10141 struct drm_i915_gem_object *obj; 10234 struct drm_i915_gem_object *obj;
10142 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 10235 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
10143 10236
@@ -10152,7 +10245,11 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
10152 bpp); 10245 bpp);
10153 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth); 10246 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
10154 10247
10155 return intel_framebuffer_create(dev, &mode_cmd, obj); 10248 fb = intel_framebuffer_create(dev, &mode_cmd, obj);
10249 if (IS_ERR(fb))
10250 drm_gem_object_unreference_unlocked(&obj->base);
10251
10252 return fb;
10156} 10253}
10157 10254
10158static struct drm_framebuffer * 10255static struct drm_framebuffer *
@@ -11055,7 +11152,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
11055 */ 11152 */
11056 if (ring->id == RCS) { 11153 if (ring->id == RCS) {
11057 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 11154 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
11058 intel_ring_emit(ring, DERRMR); 11155 intel_ring_emit_reg(ring, DERRMR);
11059 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE | 11156 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
11060 DERRMR_PIPEB_PRI_FLIP_DONE | 11157 DERRMR_PIPEB_PRI_FLIP_DONE |
11061 DERRMR_PIPEC_PRI_FLIP_DONE)); 11158 DERRMR_PIPEC_PRI_FLIP_DONE));
@@ -11065,7 +11162,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
11065 else 11162 else
11066 intel_ring_emit(ring, MI_STORE_REGISTER_MEM | 11163 intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
11067 MI_SRM_LRM_GLOBAL_GTT); 11164 MI_SRM_LRM_GLOBAL_GTT);
11068 intel_ring_emit(ring, DERRMR); 11165 intel_ring_emit_reg(ring, DERRMR);
11069 intel_ring_emit(ring, ring->scratch.gtt_offset + 256); 11166 intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
11070 if (IS_GEN8(dev)) { 11167 if (IS_GEN8(dev)) {
11071 intel_ring_emit(ring, 0); 11168 intel_ring_emit(ring, 0);
@@ -11105,18 +11202,23 @@ static bool use_mmio_flip(struct intel_engine_cs *ring,
11105 return true; 11202 return true;
11106 else if (i915.enable_execlists) 11203 else if (i915.enable_execlists)
11107 return true; 11204 return true;
11205 else if (obj->base.dma_buf &&
11206 !reservation_object_test_signaled_rcu(obj->base.dma_buf->resv,
11207 false))
11208 return true;
11108 else 11209 else
11109 return ring != i915_gem_request_get_ring(obj->last_write_req); 11210 return ring != i915_gem_request_get_ring(obj->last_write_req);
11110} 11211}
11111 11212
11112static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, 11213static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
11214 unsigned int rotation,
11113 struct intel_unpin_work *work) 11215 struct intel_unpin_work *work)
11114{ 11216{
11115 struct drm_device *dev = intel_crtc->base.dev; 11217 struct drm_device *dev = intel_crtc->base.dev;
11116 struct drm_i915_private *dev_priv = dev->dev_private; 11218 struct drm_i915_private *dev_priv = dev->dev_private;
11117 struct drm_framebuffer *fb = intel_crtc->base.primary->fb; 11219 struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
11118 const enum pipe pipe = intel_crtc->pipe; 11220 const enum pipe pipe = intel_crtc->pipe;
11119 u32 ctl, stride; 11221 u32 ctl, stride, tile_height;
11120 11222
11121 ctl = I915_READ(PLANE_CTL(pipe, 0)); 11223 ctl = I915_READ(PLANE_CTL(pipe, 0));
11122 ctl &= ~PLANE_CTL_TILED_MASK; 11224 ctl &= ~PLANE_CTL_TILED_MASK;
@@ -11140,9 +11242,16 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
11140 * The stride is either expressed as a multiple of 64 bytes chunks for 11242 * The stride is either expressed as a multiple of 64 bytes chunks for
11141 * linear buffers or in number of tiles for tiled buffers. 11243 * linear buffers or in number of tiles for tiled buffers.
11142 */ 11244 */
11143 stride = fb->pitches[0] / 11245 if (intel_rotation_90_or_270(rotation)) {
11144 intel_fb_stride_alignment(dev, fb->modifier[0], 11246 /* stride = Surface height in tiles */
11145 fb->pixel_format); 11247 tile_height = intel_tile_height(dev, fb->pixel_format,
11248 fb->modifier[0], 0);
11249 stride = DIV_ROUND_UP(fb->height, tile_height);
11250 } else {
11251 stride = fb->pitches[0] /
11252 intel_fb_stride_alignment(dev, fb->modifier[0],
11253 fb->pixel_format);
11254 }
11146 11255
11147 /* 11256 /*
11148 * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on 11257 * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
@@ -11163,10 +11272,9 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
11163 struct intel_framebuffer *intel_fb = 11272 struct intel_framebuffer *intel_fb =
11164 to_intel_framebuffer(intel_crtc->base.primary->fb); 11273 to_intel_framebuffer(intel_crtc->base.primary->fb);
11165 struct drm_i915_gem_object *obj = intel_fb->obj; 11274 struct drm_i915_gem_object *obj = intel_fb->obj;
11275 i915_reg_t reg = DSPCNTR(intel_crtc->plane);
11166 u32 dspcntr; 11276 u32 dspcntr;
11167 u32 reg;
11168 11277
11169 reg = DSPCNTR(intel_crtc->plane);
11170 dspcntr = I915_READ(reg); 11278 dspcntr = I915_READ(reg);
11171 11279
11172 if (obj->tiling_mode != I915_TILING_NONE) 11280 if (obj->tiling_mode != I915_TILING_NONE)
@@ -11200,7 +11308,7 @@ static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip)
11200 intel_pipe_update_start(crtc); 11308 intel_pipe_update_start(crtc);
11201 11309
11202 if (INTEL_INFO(mmio_flip->i915)->gen >= 9) 11310 if (INTEL_INFO(mmio_flip->i915)->gen >= 9)
11203 skl_do_mmio_flip(crtc, work); 11311 skl_do_mmio_flip(crtc, mmio_flip->rotation, work);
11204 else 11312 else
11205 /* use_mmio_flip() retricts MMIO flips to ilk+ */ 11313 /* use_mmio_flip() retricts MMIO flips to ilk+ */
11206 ilk_do_mmio_flip(crtc, work); 11314 ilk_do_mmio_flip(crtc, work);
@@ -11212,6 +11320,9 @@ static void intel_mmio_flip_work_func(struct work_struct *work)
11212{ 11320{
11213 struct intel_mmio_flip *mmio_flip = 11321 struct intel_mmio_flip *mmio_flip =
11214 container_of(work, struct intel_mmio_flip, work); 11322 container_of(work, struct intel_mmio_flip, work);
11323 struct intel_framebuffer *intel_fb =
11324 to_intel_framebuffer(mmio_flip->crtc->base.primary->fb);
11325 struct drm_i915_gem_object *obj = intel_fb->obj;
11215 11326
11216 if (mmio_flip->req) { 11327 if (mmio_flip->req) {
11217 WARN_ON(__i915_wait_request(mmio_flip->req, 11328 WARN_ON(__i915_wait_request(mmio_flip->req,
@@ -11221,16 +11332,19 @@ static void intel_mmio_flip_work_func(struct work_struct *work)
11221 i915_gem_request_unreference__unlocked(mmio_flip->req); 11332 i915_gem_request_unreference__unlocked(mmio_flip->req);
11222 } 11333 }
11223 11334
11335 /* For framebuffer backed by dmabuf, wait for fence */
11336 if (obj->base.dma_buf)
11337 WARN_ON(reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv,
11338 false, false,
11339 MAX_SCHEDULE_TIMEOUT) < 0);
11340
11224 intel_do_mmio_flip(mmio_flip); 11341 intel_do_mmio_flip(mmio_flip);
11225 kfree(mmio_flip); 11342 kfree(mmio_flip);
11226} 11343}
11227 11344
11228static int intel_queue_mmio_flip(struct drm_device *dev, 11345static int intel_queue_mmio_flip(struct drm_device *dev,
11229 struct drm_crtc *crtc, 11346 struct drm_crtc *crtc,
11230 struct drm_framebuffer *fb, 11347 struct drm_i915_gem_object *obj)
11231 struct drm_i915_gem_object *obj,
11232 struct intel_engine_cs *ring,
11233 uint32_t flags)
11234{ 11348{
11235 struct intel_mmio_flip *mmio_flip; 11349 struct intel_mmio_flip *mmio_flip;
11236 11350
@@ -11241,6 +11355,7 @@ static int intel_queue_mmio_flip(struct drm_device *dev,
11241 mmio_flip->i915 = to_i915(dev); 11355 mmio_flip->i915 = to_i915(dev);
11242 mmio_flip->req = i915_gem_request_reference(obj->last_write_req); 11356 mmio_flip->req = i915_gem_request_reference(obj->last_write_req);
11243 mmio_flip->crtc = to_intel_crtc(crtc); 11357 mmio_flip->crtc = to_intel_crtc(crtc);
11358 mmio_flip->rotation = crtc->primary->state->rotation;
11244 11359
11245 INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func); 11360 INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func);
11246 schedule_work(&mmio_flip->work); 11361 schedule_work(&mmio_flip->work);
@@ -11446,9 +11561,14 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
11446 * synchronisation, so all we want here is to pin the framebuffer 11561 * synchronisation, so all we want here is to pin the framebuffer
11447 * into the display plane and skip any waits. 11562 * into the display plane and skip any waits.
11448 */ 11563 */
11564 if (!mmio_flip) {
11565 ret = i915_gem_object_sync(obj, ring, &request);
11566 if (ret)
11567 goto cleanup_pending;
11568 }
11569
11449 ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, 11570 ret = intel_pin_and_fence_fb_obj(crtc->primary, fb,
11450 crtc->primary->state, 11571 crtc->primary->state);
11451 mmio_flip ? i915_gem_request_get_ring(obj->last_write_req) : ring, &request);
11452 if (ret) 11572 if (ret)
11453 goto cleanup_pending; 11573 goto cleanup_pending;
11454 11574
@@ -11457,8 +11577,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
11457 work->gtt_offset += intel_crtc->dspaddr_offset; 11577 work->gtt_offset += intel_crtc->dspaddr_offset;
11458 11578
11459 if (mmio_flip) { 11579 if (mmio_flip) {
11460 ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring, 11580 ret = intel_queue_mmio_flip(dev, crtc, obj);
11461 page_flip_flags);
11462 if (ret) 11581 if (ret)
11463 goto cleanup_unpin; 11582 goto cleanup_unpin;
11464 11583
@@ -11489,7 +11608,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
11489 to_intel_plane(primary)->frontbuffer_bit); 11608 to_intel_plane(primary)->frontbuffer_bit);
11490 mutex_unlock(&dev->struct_mutex); 11609 mutex_unlock(&dev->struct_mutex);
11491 11610
11492 intel_fbc_disable_crtc(intel_crtc); 11611 intel_fbc_deactivate(intel_crtc);
11493 intel_frontbuffer_flip_prepare(dev, 11612 intel_frontbuffer_flip_prepare(dev,
11494 to_intel_plane(primary)->frontbuffer_bit); 11613 to_intel_plane(primary)->frontbuffer_bit);
11495 11614
@@ -11572,18 +11691,32 @@ retry:
11572static bool intel_wm_need_update(struct drm_plane *plane, 11691static bool intel_wm_need_update(struct drm_plane *plane,
11573 struct drm_plane_state *state) 11692 struct drm_plane_state *state)
11574{ 11693{
11575 /* Update watermarks on tiling changes. */ 11694 struct intel_plane_state *new = to_intel_plane_state(state);
11695 struct intel_plane_state *cur = to_intel_plane_state(plane->state);
11696
11697 /* Update watermarks on tiling or size changes. */
11576 if (!plane->state->fb || !state->fb || 11698 if (!plane->state->fb || !state->fb ||
11577 plane->state->fb->modifier[0] != state->fb->modifier[0] || 11699 plane->state->fb->modifier[0] != state->fb->modifier[0] ||
11578 plane->state->rotation != state->rotation) 11700 plane->state->rotation != state->rotation ||
11579 return true; 11701 drm_rect_width(&new->src) != drm_rect_width(&cur->src) ||
11580 11702 drm_rect_height(&new->src) != drm_rect_height(&cur->src) ||
11581 if (plane->state->crtc_w != state->crtc_w) 11703 drm_rect_width(&new->dst) != drm_rect_width(&cur->dst) ||
11704 drm_rect_height(&new->dst) != drm_rect_height(&cur->dst))
11582 return true; 11705 return true;
11583 11706
11584 return false; 11707 return false;
11585} 11708}
11586 11709
11710static bool needs_scaling(struct intel_plane_state *state)
11711{
11712 int src_w = drm_rect_width(&state->src) >> 16;
11713 int src_h = drm_rect_height(&state->src) >> 16;
11714 int dst_w = drm_rect_width(&state->dst);
11715 int dst_h = drm_rect_height(&state->dst);
11716
11717 return (src_w != dst_w || src_h != dst_h);
11718}
11719
11587int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, 11720int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11588 struct drm_plane_state *plane_state) 11721 struct drm_plane_state *plane_state)
11589{ 11722{
@@ -11599,7 +11732,6 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11599 bool mode_changed = needs_modeset(crtc_state); 11732 bool mode_changed = needs_modeset(crtc_state);
11600 bool was_crtc_enabled = crtc->state->active; 11733 bool was_crtc_enabled = crtc->state->active;
11601 bool is_crtc_enabled = crtc_state->active; 11734 bool is_crtc_enabled = crtc_state->active;
11602
11603 bool turn_off, turn_on, visible, was_visible; 11735 bool turn_off, turn_on, visible, was_visible;
11604 struct drm_framebuffer *fb = plane_state->fb; 11736 struct drm_framebuffer *fb = plane_state->fb;
11605 11737
@@ -11612,14 +11744,6 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11612 return ret; 11744 return ret;
11613 } 11745 }
11614 11746
11615 /*
11616 * Disabling a plane is always okay; we just need to update
11617 * fb tracking in a special way since cleanup_fb() won't
11618 * get called by the plane helpers.
11619 */
11620 if (old_plane_state->base.fb && !fb)
11621 intel_crtc->atomic.disabled_planes |= 1 << i;
11622
11623 was_visible = old_plane_state->visible; 11747 was_visible = old_plane_state->visible;
11624 visible = to_intel_plane_state(plane_state)->visible; 11748 visible = to_intel_plane_state(plane_state)->visible;
11625 11749
@@ -11669,7 +11793,6 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11669 11793
11670 switch (plane->type) { 11794 switch (plane->type) {
11671 case DRM_PLANE_TYPE_PRIMARY: 11795 case DRM_PLANE_TYPE_PRIMARY:
11672 intel_crtc->atomic.wait_for_flips = true;
11673 intel_crtc->atomic.pre_disable_primary = turn_off; 11796 intel_crtc->atomic.pre_disable_primary = turn_off;
11674 intel_crtc->atomic.post_enable_primary = turn_on; 11797 intel_crtc->atomic.post_enable_primary = turn_on;
11675 11798
@@ -11717,11 +11840,23 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11717 case DRM_PLANE_TYPE_CURSOR: 11840 case DRM_PLANE_TYPE_CURSOR:
11718 break; 11841 break;
11719 case DRM_PLANE_TYPE_OVERLAY: 11842 case DRM_PLANE_TYPE_OVERLAY:
11720 if (turn_off && !mode_changed) { 11843 /*
11844 * WaCxSRDisabledForSpriteScaling:ivb
11845 *
11846 * cstate->update_wm was already set above, so this flag will
11847 * take effect when we commit and program watermarks.
11848 */
11849 if (IS_IVYBRIDGE(dev) &&
11850 needs_scaling(to_intel_plane_state(plane_state)) &&
11851 !needs_scaling(old_plane_state)) {
11852 to_intel_crtc_state(crtc_state)->disable_lp_wm = true;
11853 } else if (turn_off && !mode_changed) {
11721 intel_crtc->atomic.wait_vblank = true; 11854 intel_crtc->atomic.wait_vblank = true;
11722 intel_crtc->atomic.update_sprite_watermarks |= 11855 intel_crtc->atomic.update_sprite_watermarks |=
11723 1 << i; 11856 1 << i;
11724 } 11857 }
11858
11859 break;
11725 } 11860 }
11726 return 0; 11861 return 0;
11727} 11862}
@@ -11806,6 +11941,12 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
11806 } 11941 }
11807 11942
11808 ret = 0; 11943 ret = 0;
11944 if (dev_priv->display.compute_pipe_wm) {
11945 ret = dev_priv->display.compute_pipe_wm(intel_crtc, state);
11946 if (ret)
11947 return ret;
11948 }
11949
11809 if (INTEL_INFO(dev)->gen >= 9) { 11950 if (INTEL_INFO(dev)->gen >= 9) {
11810 if (mode_changed) 11951 if (mode_changed)
11811 ret = skl_update_scaler_crtc(pipe_config); 11952 ret = skl_update_scaler_crtc(pipe_config);
@@ -11995,7 +12136,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
11995 pipe_config->dpll_hw_state.pll9, 12136 pipe_config->dpll_hw_state.pll9,
11996 pipe_config->dpll_hw_state.pll10, 12137 pipe_config->dpll_hw_state.pll10,
11997 pipe_config->dpll_hw_state.pcsdw12); 12138 pipe_config->dpll_hw_state.pcsdw12);
11998 } else if (IS_SKYLAKE(dev)) { 12139 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
11999 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: " 12140 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: "
12000 "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n", 12141 "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
12001 pipe_config->ddi_pll_sel, 12142 pipe_config->ddi_pll_sel,
@@ -12003,9 +12144,10 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
12003 pipe_config->dpll_hw_state.cfgcr1, 12144 pipe_config->dpll_hw_state.cfgcr1,
12004 pipe_config->dpll_hw_state.cfgcr2); 12145 pipe_config->dpll_hw_state.cfgcr2);
12005 } else if (HAS_DDI(dev)) { 12146 } else if (HAS_DDI(dev)) {
12006 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x\n", 12147 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
12007 pipe_config->ddi_pll_sel, 12148 pipe_config->ddi_pll_sel,
12008 pipe_config->dpll_hw_state.wrpll); 12149 pipe_config->dpll_hw_state.wrpll,
12150 pipe_config->dpll_hw_state.spll);
12009 } else { 12151 } else {
12010 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, " 12152 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
12011 "fp0: 0x%x, fp1: 0x%x\n", 12153 "fp0: 0x%x, fp1: 0x%x\n",
@@ -12248,6 +12390,18 @@ intel_modeset_update_crtc_state(struct drm_atomic_state *state)
12248 crtc->hwmode = crtc->state->adjusted_mode; 12390 crtc->hwmode = crtc->state->adjusted_mode;
12249 else 12391 else
12250 crtc->hwmode.crtc_clock = 0; 12392 crtc->hwmode.crtc_clock = 0;
12393
12394 /*
12395 * Update legacy state to satisfy fbc code. This can
12396 * be removed when fbc uses the atomic state.
12397 */
12398 if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
12399 struct drm_plane_state *plane_state = crtc->primary->state;
12400
12401 crtc->primary->fb = plane_state->fb;
12402 crtc->x = plane_state->src_x >> 16;
12403 crtc->y = plane_state->src_y >> 16;
12404 }
12251 } 12405 }
12252} 12406}
12253 12407
@@ -12273,7 +12427,7 @@ static bool intel_fuzzy_clock_check(int clock1, int clock2)
12273 list_for_each_entry((intel_crtc), \ 12427 list_for_each_entry((intel_crtc), \
12274 &(dev)->mode_config.crtc_list, \ 12428 &(dev)->mode_config.crtc_list, \
12275 base.head) \ 12429 base.head) \
12276 if (mask & (1 <<(intel_crtc)->pipe)) 12430 for_each_if (mask & (1 <<(intel_crtc)->pipe))
12277 12431
12278static bool 12432static bool
12279intel_compare_m_n(unsigned int m, unsigned int n, 12433intel_compare_m_n(unsigned int m, unsigned int n,
@@ -12452,12 +12606,13 @@ intel_pipe_config_compare(struct drm_device *dev,
12452 if (INTEL_INFO(dev)->gen < 8) { 12606 if (INTEL_INFO(dev)->gen < 8) {
12453 PIPE_CONF_CHECK_M_N(dp_m_n); 12607 PIPE_CONF_CHECK_M_N(dp_m_n);
12454 12608
12455 PIPE_CONF_CHECK_I(has_drrs);
12456 if (current_config->has_drrs) 12609 if (current_config->has_drrs)
12457 PIPE_CONF_CHECK_M_N(dp_m2_n2); 12610 PIPE_CONF_CHECK_M_N(dp_m2_n2);
12458 } else 12611 } else
12459 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2); 12612 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
12460 12613
12614 PIPE_CONF_CHECK_I(has_dsi_encoder);
12615
12461 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay); 12616 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
12462 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal); 12617 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
12463 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start); 12618 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
@@ -12528,6 +12683,7 @@ intel_pipe_config_compare(struct drm_device *dev,
12528 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 12683 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
12529 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 12684 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
12530 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); 12685 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
12686 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
12531 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); 12687 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
12532 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); 12688 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
12533 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); 12689 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
@@ -13011,6 +13167,45 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
13011 return 0; 13167 return 0;
13012} 13168}
13013 13169
13170/*
13171 * Handle calculation of various watermark data at the end of the atomic check
13172 * phase. The code here should be run after the per-crtc and per-plane 'check'
13173 * handlers to ensure that all derived state has been updated.
13174 */
13175static void calc_watermark_data(struct drm_atomic_state *state)
13176{
13177 struct drm_device *dev = state->dev;
13178 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13179 struct drm_crtc *crtc;
13180 struct drm_crtc_state *cstate;
13181 struct drm_plane *plane;
13182 struct drm_plane_state *pstate;
13183
13184 /*
13185 * Calculate watermark configuration details now that derived
13186 * plane/crtc state is all properly updated.
13187 */
13188 drm_for_each_crtc(crtc, dev) {
13189 cstate = drm_atomic_get_existing_crtc_state(state, crtc) ?:
13190 crtc->state;
13191
13192 if (cstate->active)
13193 intel_state->wm_config.num_pipes_active++;
13194 }
13195 drm_for_each_legacy_plane(plane, dev) {
13196 pstate = drm_atomic_get_existing_plane_state(state, plane) ?:
13197 plane->state;
13198
13199 if (!to_intel_plane_state(pstate)->visible)
13200 continue;
13201
13202 intel_state->wm_config.sprites_enabled = true;
13203 if (pstate->crtc_w != pstate->src_w >> 16 ||
13204 pstate->crtc_h != pstate->src_h >> 16)
13205 intel_state->wm_config.sprites_scaled = true;
13206 }
13207}
13208
13014/** 13209/**
13015 * intel_atomic_check - validate state object 13210 * intel_atomic_check - validate state object
13016 * @dev: drm device 13211 * @dev: drm device
@@ -13019,6 +13214,7 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
13019static int intel_atomic_check(struct drm_device *dev, 13214static int intel_atomic_check(struct drm_device *dev,
13020 struct drm_atomic_state *state) 13215 struct drm_atomic_state *state)
13021{ 13216{
13217 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13022 struct drm_crtc *crtc; 13218 struct drm_crtc *crtc;
13023 struct drm_crtc_state *crtc_state; 13219 struct drm_crtc_state *crtc_state;
13024 int ret, i; 13220 int ret, i;
@@ -13032,6 +13228,9 @@ static int intel_atomic_check(struct drm_device *dev,
13032 struct intel_crtc_state *pipe_config = 13228 struct intel_crtc_state *pipe_config =
13033 to_intel_crtc_state(crtc_state); 13229 to_intel_crtc_state(crtc_state);
13034 13230
13231 memset(&to_intel_crtc(crtc)->atomic, 0,
13232 sizeof(struct intel_crtc_atomic_commit));
13233
13035 /* Catch I915_MODE_FLAG_INHERITED */ 13234 /* Catch I915_MODE_FLAG_INHERITED */
13036 if (crtc_state->mode.private_flags != crtc->state->mode.private_flags) 13235 if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
13037 crtc_state->mode_changed = true; 13236 crtc_state->mode_changed = true;
@@ -13056,7 +13255,8 @@ static int intel_atomic_check(struct drm_device *dev,
13056 if (ret) 13255 if (ret)
13057 return ret; 13256 return ret;
13058 13257
13059 if (intel_pipe_config_compare(state->dev, 13258 if (i915.fastboot &&
13259 intel_pipe_config_compare(state->dev,
13060 to_intel_crtc_state(crtc->state), 13260 to_intel_crtc_state(crtc->state),
13061 pipe_config, true)) { 13261 pipe_config, true)) {
13062 crtc_state->mode_changed = false; 13262 crtc_state->mode_changed = false;
@@ -13082,10 +13282,81 @@ static int intel_atomic_check(struct drm_device *dev,
13082 if (ret) 13282 if (ret)
13083 return ret; 13283 return ret;
13084 } else 13284 } else
13085 to_intel_atomic_state(state)->cdclk = 13285 intel_state->cdclk = to_i915(state->dev)->cdclk_freq;
13086 to_i915(state->dev)->cdclk_freq; 13286
13287 ret = drm_atomic_helper_check_planes(state->dev, state);
13288 if (ret)
13289 return ret;
13290
13291 calc_watermark_data(state);
13292
13293 return 0;
13294}
13295
13296static int intel_atomic_prepare_commit(struct drm_device *dev,
13297 struct drm_atomic_state *state,
13298 bool async)
13299{
13300 struct drm_i915_private *dev_priv = dev->dev_private;
13301 struct drm_plane_state *plane_state;
13302 struct drm_crtc_state *crtc_state;
13303 struct drm_plane *plane;
13304 struct drm_crtc *crtc;
13305 int i, ret;
13087 13306
13088 return drm_atomic_helper_check_planes(state->dev, state); 13307 if (async) {
13308 DRM_DEBUG_KMS("i915 does not yet support async commit\n");
13309 return -EINVAL;
13310 }
13311
13312 for_each_crtc_in_state(state, crtc, crtc_state, i) {
13313 ret = intel_crtc_wait_for_pending_flips(crtc);
13314 if (ret)
13315 return ret;
13316
13317 if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2)
13318 flush_workqueue(dev_priv->wq);
13319 }
13320
13321 ret = mutex_lock_interruptible(&dev->struct_mutex);
13322 if (ret)
13323 return ret;
13324
13325 ret = drm_atomic_helper_prepare_planes(dev, state);
13326 if (!ret && !async && !i915_reset_in_progress(&dev_priv->gpu_error)) {
13327 u32 reset_counter;
13328
13329 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
13330 mutex_unlock(&dev->struct_mutex);
13331
13332 for_each_plane_in_state(state, plane, plane_state, i) {
13333 struct intel_plane_state *intel_plane_state =
13334 to_intel_plane_state(plane_state);
13335
13336 if (!intel_plane_state->wait_req)
13337 continue;
13338
13339 ret = __i915_wait_request(intel_plane_state->wait_req,
13340 reset_counter, true,
13341 NULL, NULL);
13342
13343 /* Swallow -EIO errors to allow updates during hw lockup. */
13344 if (ret == -EIO)
13345 ret = 0;
13346
13347 if (ret)
13348 break;
13349 }
13350
13351 if (!ret)
13352 return 0;
13353
13354 mutex_lock(&dev->struct_mutex);
13355 drm_atomic_helper_cleanup_planes(dev, state);
13356 }
13357
13358 mutex_unlock(&dev->struct_mutex);
13359 return ret;
13089} 13360}
13090 13361
13091/** 13362/**
@@ -13109,22 +13380,20 @@ static int intel_atomic_commit(struct drm_device *dev,
13109 bool async) 13380 bool async)
13110{ 13381{
13111 struct drm_i915_private *dev_priv = dev->dev_private; 13382 struct drm_i915_private *dev_priv = dev->dev_private;
13112 struct drm_crtc *crtc;
13113 struct drm_crtc_state *crtc_state; 13383 struct drm_crtc_state *crtc_state;
13384 struct drm_crtc *crtc;
13114 int ret = 0; 13385 int ret = 0;
13115 int i; 13386 int i;
13116 bool any_ms = false; 13387 bool any_ms = false;
13117 13388
13118 if (async) { 13389 ret = intel_atomic_prepare_commit(dev, state, async);
13119 DRM_DEBUG_KMS("i915 does not yet support async commit\n"); 13390 if (ret) {
13120 return -EINVAL; 13391 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
13121 }
13122
13123 ret = drm_atomic_helper_prepare_planes(dev, state);
13124 if (ret)
13125 return ret; 13392 return ret;
13393 }
13126 13394
13127 drm_atomic_helper_swap_state(dev, state); 13395 drm_atomic_helper_swap_state(dev, state);
13396 dev_priv->wm.config = to_intel_atomic_state(state)->wm_config;
13128 13397
13129 for_each_crtc_in_state(state, crtc, crtc_state, i) { 13398 for_each_crtc_in_state(state, crtc, crtc_state, i) {
13130 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13399 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -13140,6 +13409,13 @@ static int intel_atomic_commit(struct drm_device *dev,
13140 dev_priv->display.crtc_disable(crtc); 13409 dev_priv->display.crtc_disable(crtc);
13141 intel_crtc->active = false; 13410 intel_crtc->active = false;
13142 intel_disable_shared_dpll(intel_crtc); 13411 intel_disable_shared_dpll(intel_crtc);
13412
13413 /*
13414 * Underruns don't always raise
13415 * interrupts, so check manually.
13416 */
13417 intel_check_cpu_fifo_underruns(dev_priv);
13418 intel_check_pch_fifo_underruns(dev_priv);
13143 } 13419 }
13144 } 13420 }
13145 13421
@@ -13162,6 +13438,9 @@ static int intel_atomic_commit(struct drm_device *dev,
13162 to_intel_crtc_state(crtc->state)->update_pipe; 13438 to_intel_crtc_state(crtc->state)->update_pipe;
13163 unsigned long put_domains = 0; 13439 unsigned long put_domains = 0;
13164 13440
13441 if (modeset)
13442 intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
13443
13165 if (modeset && crtc->state->active) { 13444 if (modeset && crtc->state->active) {
13166 update_scanline_offset(to_intel_crtc(crtc)); 13445 update_scanline_offset(to_intel_crtc(crtc));
13167 dev_priv->display.crtc_enable(crtc); 13446 dev_priv->display.crtc_enable(crtc);
@@ -13177,18 +13456,26 @@ static int intel_atomic_commit(struct drm_device *dev,
13177 if (!modeset) 13456 if (!modeset)
13178 intel_pre_plane_update(intel_crtc); 13457 intel_pre_plane_update(intel_crtc);
13179 13458
13180 drm_atomic_helper_commit_planes_on_crtc(crtc_state); 13459 if (crtc->state->active &&
13460 (crtc->state->planes_changed || update_pipe))
13461 drm_atomic_helper_commit_planes_on_crtc(crtc_state);
13181 13462
13182 if (put_domains) 13463 if (put_domains)
13183 modeset_put_power_domains(dev_priv, put_domains); 13464 modeset_put_power_domains(dev_priv, put_domains);
13184 13465
13185 intel_post_plane_update(intel_crtc); 13466 intel_post_plane_update(intel_crtc);
13467
13468 if (modeset)
13469 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
13186 } 13470 }
13187 13471
13188 /* FIXME: add subpixel order */ 13472 /* FIXME: add subpixel order */
13189 13473
13190 drm_atomic_helper_wait_for_vblanks(dev, state); 13474 drm_atomic_helper_wait_for_vblanks(dev, state);
13475
13476 mutex_lock(&dev->struct_mutex);
13191 drm_atomic_helper_cleanup_planes(dev, state); 13477 drm_atomic_helper_cleanup_planes(dev, state);
13478 mutex_unlock(&dev->struct_mutex);
13192 13479
13193 if (any_ms) 13480 if (any_ms)
13194 intel_modeset_check_state(dev, state); 13481 intel_modeset_check_state(dev, state);
@@ -13357,6 +13644,8 @@ static void intel_shared_dpll_init(struct drm_device *dev)
13357 * bits. Some older platforms need special physical address handling for 13644 * bits. Some older platforms need special physical address handling for
13358 * cursor planes. 13645 * cursor planes.
13359 * 13646 *
13647 * Must be called with struct_mutex held.
13648 *
13360 * Returns 0 on success, negative error code on failure. 13649 * Returns 0 on success, negative error code on failure.
13361 */ 13650 */
13362int 13651int
@@ -13367,28 +13656,69 @@ intel_prepare_plane_fb(struct drm_plane *plane,
13367 struct drm_framebuffer *fb = new_state->fb; 13656 struct drm_framebuffer *fb = new_state->fb;
13368 struct intel_plane *intel_plane = to_intel_plane(plane); 13657 struct intel_plane *intel_plane = to_intel_plane(plane);
13369 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 13658 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13370 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb); 13659 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
13371 int ret = 0; 13660 int ret = 0;
13372 13661
13373 if (!obj) 13662 if (!obj && !old_obj)
13374 return 0; 13663 return 0;
13375 13664
13376 mutex_lock(&dev->struct_mutex); 13665 if (old_obj) {
13666 struct drm_crtc_state *crtc_state =
13667 drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc);
13668
13669 /* Big Hammer, we also need to ensure that any pending
13670 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
13671 * current scanout is retired before unpinning the old
13672 * framebuffer. Note that we rely on userspace rendering
13673 * into the buffer attached to the pipe they are waiting
13674 * on. If not, userspace generates a GPU hang with IPEHR
13675 * point to the MI_WAIT_FOR_EVENT.
13676 *
13677 * This should only fail upon a hung GPU, in which case we
13678 * can safely continue.
13679 */
13680 if (needs_modeset(crtc_state))
13681 ret = i915_gem_object_wait_rendering(old_obj, true);
13682
13683 /* Swallow -EIO errors to allow updates during hw lockup. */
13684 if (ret && ret != -EIO)
13685 return ret;
13686 }
13687
13688 /* For framebuffer backed by dmabuf, wait for fence */
13689 if (obj && obj->base.dma_buf) {
13690 ret = reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv,
13691 false, true,
13692 MAX_SCHEDULE_TIMEOUT);
13693 if (ret == -ERESTARTSYS)
13694 return ret;
13377 13695
13378 if (plane->type == DRM_PLANE_TYPE_CURSOR && 13696 WARN_ON(ret < 0);
13697 }
13698
13699 if (!obj) {
13700 ret = 0;
13701 } else if (plane->type == DRM_PLANE_TYPE_CURSOR &&
13379 INTEL_INFO(dev)->cursor_needs_physical) { 13702 INTEL_INFO(dev)->cursor_needs_physical) {
13380 int align = IS_I830(dev) ? 16 * 1024 : 256; 13703 int align = IS_I830(dev) ? 16 * 1024 : 256;
13381 ret = i915_gem_object_attach_phys(obj, align); 13704 ret = i915_gem_object_attach_phys(obj, align);
13382 if (ret) 13705 if (ret)
13383 DRM_DEBUG_KMS("failed to attach phys object\n"); 13706 DRM_DEBUG_KMS("failed to attach phys object\n");
13384 } else { 13707 } else {
13385 ret = intel_pin_and_fence_fb_obj(plane, fb, new_state, NULL, NULL); 13708 ret = intel_pin_and_fence_fb_obj(plane, fb, new_state);
13386 } 13709 }
13387 13710
13388 if (ret == 0) 13711 if (ret == 0) {
13389 i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit); 13712 if (obj) {
13713 struct intel_plane_state *plane_state =
13714 to_intel_plane_state(new_state);
13390 13715
13391 mutex_unlock(&dev->struct_mutex); 13716 i915_gem_request_assign(&plane_state->wait_req,
13717 obj->last_write_req);
13718 }
13719
13720 i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
13721 }
13392 13722
13393 return ret; 13723 return ret;
13394} 13724}
@@ -13399,23 +13729,35 @@ intel_prepare_plane_fb(struct drm_plane *plane,
13399 * @fb: old framebuffer that was on plane 13729 * @fb: old framebuffer that was on plane
13400 * 13730 *
13401 * Cleans up a framebuffer that has just been removed from a plane. 13731 * Cleans up a framebuffer that has just been removed from a plane.
13732 *
13733 * Must be called with struct_mutex held.
13402 */ 13734 */
13403void 13735void
13404intel_cleanup_plane_fb(struct drm_plane *plane, 13736intel_cleanup_plane_fb(struct drm_plane *plane,
13405 const struct drm_plane_state *old_state) 13737 const struct drm_plane_state *old_state)
13406{ 13738{
13407 struct drm_device *dev = plane->dev; 13739 struct drm_device *dev = plane->dev;
13408 struct drm_i915_gem_object *obj = intel_fb_obj(old_state->fb); 13740 struct intel_plane *intel_plane = to_intel_plane(plane);
13741 struct intel_plane_state *old_intel_state;
13742 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
13743 struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
13409 13744
13410 if (!obj) 13745 old_intel_state = to_intel_plane_state(old_state);
13746
13747 if (!obj && !old_obj)
13411 return; 13748 return;
13412 13749
13413 if (plane->type != DRM_PLANE_TYPE_CURSOR || 13750 if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
13414 !INTEL_INFO(dev)->cursor_needs_physical) { 13751 !INTEL_INFO(dev)->cursor_needs_physical))
13415 mutex_lock(&dev->struct_mutex);
13416 intel_unpin_fb_obj(old_state->fb, old_state); 13752 intel_unpin_fb_obj(old_state->fb, old_state);
13417 mutex_unlock(&dev->struct_mutex); 13753
13418 } 13754 /* prepare_fb aborted? */
13755 if ((old_obj && (old_obj->frontbuffer_bits & intel_plane->frontbuffer_bit)) ||
13756 (obj && !(obj->frontbuffer_bits & intel_plane->frontbuffer_bit)))
13757 i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
13758
13759 i915_gem_request_assign(&old_intel_state->wait_req, NULL);
13760
13419} 13761}
13420 13762
13421int 13763int
@@ -13434,7 +13776,7 @@ skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state
13434 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock; 13776 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
13435 cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk; 13777 cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
13436 13778
13437 if (!crtc_clock || !cdclk) 13779 if (WARN_ON_ONCE(!crtc_clock || cdclk < crtc_clock))
13438 return DRM_PLANE_HELPER_NO_SCALING; 13780 return DRM_PLANE_HELPER_NO_SCALING;
13439 13781
13440 /* 13782 /*
@@ -13482,18 +13824,8 @@ intel_commit_primary_plane(struct drm_plane *plane,
13482 struct drm_framebuffer *fb = state->base.fb; 13824 struct drm_framebuffer *fb = state->base.fb;
13483 struct drm_device *dev = plane->dev; 13825 struct drm_device *dev = plane->dev;
13484 struct drm_i915_private *dev_priv = dev->dev_private; 13826 struct drm_i915_private *dev_priv = dev->dev_private;
13485 struct intel_crtc *intel_crtc;
13486 struct drm_rect *src = &state->src;
13487 13827
13488 crtc = crtc ? crtc : plane->crtc; 13828 crtc = crtc ? crtc : plane->crtc;
13489 intel_crtc = to_intel_crtc(crtc);
13490
13491 plane->fb = fb;
13492 crtc->x = src->x1 >> 16;
13493 crtc->y = src->y1 >> 16;
13494
13495 if (!crtc->state->active)
13496 return;
13497 13829
13498 dev_priv->display.update_primary_plane(crtc, fb, 13830 dev_priv->display.update_primary_plane(crtc, fb,
13499 state->src.x1 >> 16, 13831 state->src.x1 >> 16,
@@ -13523,8 +13855,7 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
13523 intel_update_watermarks(crtc); 13855 intel_update_watermarks(crtc);
13524 13856
13525 /* Perform vblank evasion around commit operation */ 13857 /* Perform vblank evasion around commit operation */
13526 if (crtc->state->active) 13858 intel_pipe_update_start(intel_crtc);
13527 intel_pipe_update_start(intel_crtc);
13528 13859
13529 if (modeset) 13860 if (modeset)
13530 return; 13861 return;
@@ -13540,8 +13871,7 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc,
13540{ 13871{
13541 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13872 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13542 13873
13543 if (crtc->state->active) 13874 intel_pipe_update_end(intel_crtc);
13544 intel_pipe_update_end(intel_crtc);
13545} 13875}
13546 13876
13547/** 13877/**
@@ -13618,7 +13948,7 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
13618 drm_universal_plane_init(dev, &primary->base, 0, 13948 drm_universal_plane_init(dev, &primary->base, 0,
13619 &intel_plane_funcs, 13949 &intel_plane_funcs,
13620 intel_primary_formats, num_formats, 13950 intel_primary_formats, num_formats,
13621 DRM_PLANE_TYPE_PRIMARY); 13951 DRM_PLANE_TYPE_PRIMARY, NULL);
13622 13952
13623 if (INTEL_INFO(dev)->gen >= 4) 13953 if (INTEL_INFO(dev)->gen >= 4)
13624 intel_create_rotation_property(dev, primary); 13954 intel_create_rotation_property(dev, primary);
@@ -13724,8 +14054,7 @@ intel_commit_cursor_plane(struct drm_plane *plane,
13724 intel_crtc->cursor_bo = obj; 14054 intel_crtc->cursor_bo = obj;
13725 14055
13726update: 14056update:
13727 if (crtc->state->active) 14057 intel_crtc_update_cursor(crtc, state->visible);
13728 intel_crtc_update_cursor(crtc, state->visible);
13729} 14058}
13730 14059
13731static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, 14060static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
@@ -13758,7 +14087,7 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
13758 &intel_plane_funcs, 14087 &intel_plane_funcs,
13759 intel_cursor_formats, 14088 intel_cursor_formats,
13760 ARRAY_SIZE(intel_cursor_formats), 14089 ARRAY_SIZE(intel_cursor_formats),
13761 DRM_PLANE_TYPE_CURSOR); 14090 DRM_PLANE_TYPE_CURSOR, NULL);
13762 14091
13763 if (INTEL_INFO(dev)->gen >= 4) { 14092 if (INTEL_INFO(dev)->gen >= 4) {
13764 if (!dev->mode_config.rotation_property) 14093 if (!dev->mode_config.rotation_property)
@@ -13835,7 +14164,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
13835 goto fail; 14164 goto fail;
13836 14165
13837 ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary, 14166 ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
13838 cursor, &intel_crtc_funcs); 14167 cursor, &intel_crtc_funcs, NULL);
13839 if (ret) 14168 if (ret)
13840 goto fail; 14169 goto fail;
13841 14170
@@ -13961,7 +14290,14 @@ static bool intel_crt_present(struct drm_device *dev)
13961 if (IS_CHERRYVIEW(dev)) 14290 if (IS_CHERRYVIEW(dev))
13962 return false; 14291 return false;
13963 14292
13964 if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support) 14293 if (HAS_PCH_LPT_H(dev) && I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
14294 return false;
14295
14296 /* DDI E can't be used if DDI A requires 4 lanes */
14297 if (HAS_DDI(dev) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
14298 return false;
14299
14300 if (!dev_priv->vbt.int_crt_support)
13965 return false; 14301 return false;
13966 14302
13967 return true; 14303 return true;
@@ -13997,7 +14333,7 @@ static void intel_setup_outputs(struct drm_device *dev)
13997 */ 14333 */
13998 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; 14334 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
13999 /* WaIgnoreDDIAStrap: skl */ 14335 /* WaIgnoreDDIAStrap: skl */
14000 if (found || IS_SKYLAKE(dev)) 14336 if (found || IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
14001 intel_ddi_init(dev, PORT_A); 14337 intel_ddi_init(dev, PORT_A);
14002 14338
14003 /* DDI B, C and D detection is indicated by the SFUSE_STRAP 14339 /* DDI B, C and D detection is indicated by the SFUSE_STRAP
@@ -14013,7 +14349,7 @@ static void intel_setup_outputs(struct drm_device *dev)
14013 /* 14349 /*
14014 * On SKL we don't have a way to detect DDI-E so we rely on VBT. 14350 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
14015 */ 14351 */
14016 if (IS_SKYLAKE(dev) && 14352 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
14017 (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp || 14353 (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
14018 dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi || 14354 dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
14019 dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi)) 14355 dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
@@ -14028,7 +14364,7 @@ static void intel_setup_outputs(struct drm_device *dev)
14028 14364
14029 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) { 14365 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
14030 /* PCH SDVOB multiplex with HDMIB */ 14366 /* PCH SDVOB multiplex with HDMIB */
14031 found = intel_sdvo_init(dev, PCH_SDVOB, true); 14367 found = intel_sdvo_init(dev, PCH_SDVOB, PORT_B);
14032 if (!found) 14368 if (!found)
14033 intel_hdmi_init(dev, PCH_HDMIB, PORT_B); 14369 intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
14034 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) 14370 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
@@ -14084,7 +14420,7 @@ static void intel_setup_outputs(struct drm_device *dev)
14084 14420
14085 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 14421 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14086 DRM_DEBUG_KMS("probing SDVOB\n"); 14422 DRM_DEBUG_KMS("probing SDVOB\n");
14087 found = intel_sdvo_init(dev, GEN3_SDVOB, true); 14423 found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B);
14088 if (!found && IS_G4X(dev)) { 14424 if (!found && IS_G4X(dev)) {
14089 DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); 14425 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
14090 intel_hdmi_init(dev, GEN4_HDMIB, PORT_B); 14426 intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
@@ -14098,7 +14434,7 @@ static void intel_setup_outputs(struct drm_device *dev)
14098 14434
14099 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 14435 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14100 DRM_DEBUG_KMS("probing SDVOC\n"); 14436 DRM_DEBUG_KMS("probing SDVOC\n");
14101 found = intel_sdvo_init(dev, GEN3_SDVOC, false); 14437 found = intel_sdvo_init(dev, GEN3_SDVOC, PORT_C);
14102 } 14438 }
14103 14439
14104 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) { 14440 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
@@ -14364,16 +14700,22 @@ static int intel_framebuffer_init(struct drm_device *dev,
14364static struct drm_framebuffer * 14700static struct drm_framebuffer *
14365intel_user_framebuffer_create(struct drm_device *dev, 14701intel_user_framebuffer_create(struct drm_device *dev,
14366 struct drm_file *filp, 14702 struct drm_file *filp,
14367 struct drm_mode_fb_cmd2 *mode_cmd) 14703 const struct drm_mode_fb_cmd2 *user_mode_cmd)
14368{ 14704{
14705 struct drm_framebuffer *fb;
14369 struct drm_i915_gem_object *obj; 14706 struct drm_i915_gem_object *obj;
14707 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
14370 14708
14371 obj = to_intel_bo(drm_gem_object_lookup(dev, filp, 14709 obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
14372 mode_cmd->handles[0])); 14710 mode_cmd.handles[0]));
14373 if (&obj->base == NULL) 14711 if (&obj->base == NULL)
14374 return ERR_PTR(-ENOENT); 14712 return ERR_PTR(-ENOENT);
14375 14713
14376 return intel_framebuffer_create(dev, mode_cmd, obj); 14714 fb = intel_framebuffer_create(dev, &mode_cmd, obj);
14715 if (IS_ERR(fb))
14716 drm_gem_object_unreference_unlocked(&obj->base);
14717
14718 return fb;
14377} 14719}
14378 14720
14379#ifndef CONFIG_DRM_FBDEV_EMULATION 14721#ifndef CONFIG_DRM_FBDEV_EMULATION
@@ -14458,7 +14800,7 @@ static void intel_init_display(struct drm_device *dev)
14458 } 14800 }
14459 14801
14460 /* Returns the core display clock speed */ 14802 /* Returns the core display clock speed */
14461 if (IS_SKYLAKE(dev)) 14803 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
14462 dev_priv->display.get_display_clock_speed = 14804 dev_priv->display.get_display_clock_speed =
14463 skylake_get_display_clock_speed; 14805 skylake_get_display_clock_speed;
14464 else if (IS_BROXTON(dev)) 14806 else if (IS_BROXTON(dev))
@@ -14498,9 +14840,6 @@ static void intel_init_display(struct drm_device *dev)
14498 else if (IS_I945GM(dev) || IS_845G(dev)) 14840 else if (IS_I945GM(dev) || IS_845G(dev))
14499 dev_priv->display.get_display_clock_speed = 14841 dev_priv->display.get_display_clock_speed =
14500 i9xx_misc_get_display_clock_speed; 14842 i9xx_misc_get_display_clock_speed;
14501 else if (IS_PINEVIEW(dev))
14502 dev_priv->display.get_display_clock_speed =
14503 pnv_get_display_clock_speed;
14504 else if (IS_I915GM(dev)) 14843 else if (IS_I915GM(dev))
14505 dev_priv->display.get_display_clock_speed = 14844 dev_priv->display.get_display_clock_speed =
14506 i915gm_get_display_clock_speed; 14845 i915gm_get_display_clock_speed;
@@ -14705,6 +15044,9 @@ static struct intel_quirk intel_quirks[] = {
14705 /* Apple Macbook 2,1 (Core 2 T7400) */ 15044 /* Apple Macbook 2,1 (Core 2 T7400) */
14706 { 0x27a2, 0x8086, 0x7270, quirk_backlight_present }, 15045 { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
14707 15046
15047 /* Apple Macbook 4,1 */
15048 { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
15049
14708 /* Toshiba CB35 Chromebook (Celeron 2955U) */ 15050 /* Toshiba CB35 Chromebook (Celeron 2955U) */
14709 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, 15051 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
14710 15052
@@ -14744,7 +15086,7 @@ static void i915_disable_vga(struct drm_device *dev)
14744{ 15086{
14745 struct drm_i915_private *dev_priv = dev->dev_private; 15087 struct drm_i915_private *dev_priv = dev->dev_private;
14746 u8 sr1; 15088 u8 sr1;
14747 u32 vga_reg = i915_vgacntrl_reg(dev); 15089 i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
14748 15090
14749 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */ 15091 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
14750 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); 15092 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
@@ -14860,9 +15202,6 @@ void intel_modeset_init(struct drm_device *dev)
14860 i915_disable_vga(dev); 15202 i915_disable_vga(dev);
14861 intel_setup_outputs(dev); 15203 intel_setup_outputs(dev);
14862 15204
14863 /* Just in case the BIOS is doing something questionable. */
14864 intel_fbc_disable(dev_priv);
14865
14866 drm_modeset_lock_all(dev); 15205 drm_modeset_lock_all(dev);
14867 intel_modeset_setup_hw_state(dev); 15206 intel_modeset_setup_hw_state(dev);
14868 drm_modeset_unlock_all(dev); 15207 drm_modeset_unlock_all(dev);
@@ -14949,10 +15288,9 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
14949{ 15288{
14950 struct drm_device *dev = crtc->base.dev; 15289 struct drm_device *dev = crtc->base.dev;
14951 struct drm_i915_private *dev_priv = dev->dev_private; 15290 struct drm_i915_private *dev_priv = dev->dev_private;
14952 u32 reg; 15291 i915_reg_t reg = PIPECONF(crtc->config->cpu_transcoder);
14953 15292
14954 /* Clear any frame start delays used for debugging left by the BIOS */ 15293 /* Clear any frame start delays used for debugging left by the BIOS */
14955 reg = PIPECONF(crtc->config->cpu_transcoder);
14956 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 15294 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
14957 15295
14958 /* restore vblank interrupts to correct state */ 15296 /* restore vblank interrupts to correct state */
@@ -15106,7 +15444,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
15106void i915_redisable_vga_power_on(struct drm_device *dev) 15444void i915_redisable_vga_power_on(struct drm_device *dev)
15107{ 15445{
15108 struct drm_i915_private *dev_priv = dev->dev_private; 15446 struct drm_i915_private *dev_priv = dev->dev_private;
15109 u32 vga_reg = i915_vgacntrl_reg(dev); 15447 i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
15110 15448
15111 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { 15449 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
15112 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); 15450 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
@@ -15145,7 +15483,7 @@ static void readout_plane_state(struct intel_crtc *crtc)
15145 struct intel_plane_state *plane_state = 15483 struct intel_plane_state *plane_state =
15146 to_intel_plane_state(primary->state); 15484 to_intel_plane_state(primary->state);
15147 15485
15148 plane_state->visible = 15486 plane_state->visible = crtc->active &&
15149 primary_get_hw_state(to_intel_plane(primary)); 15487 primary_get_hw_state(to_intel_plane(primary));
15150 15488
15151 if (plane_state->visible) 15489 if (plane_state->visible)
@@ -15402,8 +15740,7 @@ void intel_modeset_gem_init(struct drm_device *dev)
15402 mutex_lock(&dev->struct_mutex); 15740 mutex_lock(&dev->struct_mutex);
15403 ret = intel_pin_and_fence_fb_obj(c->primary, 15741 ret = intel_pin_and_fence_fb_obj(c->primary,
15404 c->primary->fb, 15742 c->primary->fb,
15405 c->primary->state, 15743 c->primary->state);
15406 NULL, NULL);
15407 mutex_unlock(&dev->struct_mutex); 15744 mutex_unlock(&dev->struct_mutex);
15408 if (ret) { 15745 if (ret) {
15409 DRM_ERROR("failed to pin boot fb on pipe %d\n", 15746 DRM_ERROR("failed to pin boot fb on pipe %d\n",
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 09bdd94ca3ba..0f0573aa1b0d 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -277,7 +277,7 @@ static void pps_lock(struct intel_dp *intel_dp)
277 * See vlv_power_sequencer_reset() why we need 277 * See vlv_power_sequencer_reset() why we need
278 * a power domain reference here. 278 * a power domain reference here.
279 */ 279 */
280 power_domain = intel_display_port_power_domain(encoder); 280 power_domain = intel_display_port_aux_power_domain(encoder);
281 intel_display_power_get(dev_priv, power_domain); 281 intel_display_power_get(dev_priv, power_domain);
282 282
283 mutex_lock(&dev_priv->pps_mutex); 283 mutex_lock(&dev_priv->pps_mutex);
@@ -293,7 +293,7 @@ static void pps_unlock(struct intel_dp *intel_dp)
293 293
294 mutex_unlock(&dev_priv->pps_mutex); 294 mutex_unlock(&dev_priv->pps_mutex);
295 295
296 power_domain = intel_display_port_power_domain(encoder); 296 power_domain = intel_display_port_aux_power_domain(encoder);
297 intel_display_power_put(dev_priv, power_domain); 297 intel_display_power_put(dev_priv, power_domain);
298} 298}
299 299
@@ -541,7 +541,8 @@ void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
541 } 541 }
542} 542}
543 543
544static u32 _pp_ctrl_reg(struct intel_dp *intel_dp) 544static i915_reg_t
545_pp_ctrl_reg(struct intel_dp *intel_dp)
545{ 546{
546 struct drm_device *dev = intel_dp_to_dev(intel_dp); 547 struct drm_device *dev = intel_dp_to_dev(intel_dp);
547 548
@@ -553,7 +554,8 @@ static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
553 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp)); 554 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
554} 555}
555 556
556static u32 _pp_stat_reg(struct intel_dp *intel_dp) 557static i915_reg_t
558_pp_stat_reg(struct intel_dp *intel_dp)
557{ 559{
558 struct drm_device *dev = intel_dp_to_dev(intel_dp); 560 struct drm_device *dev = intel_dp_to_dev(intel_dp);
559 561
@@ -582,7 +584,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
582 584
583 if (IS_VALLEYVIEW(dev)) { 585 if (IS_VALLEYVIEW(dev)) {
584 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); 586 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
585 u32 pp_ctrl_reg, pp_div_reg; 587 i915_reg_t pp_ctrl_reg, pp_div_reg;
586 u32 pp_div; 588 u32 pp_div;
587 589
588 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe); 590 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
@@ -652,7 +654,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
652 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 654 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
653 struct drm_device *dev = intel_dig_port->base.base.dev; 655 struct drm_device *dev = intel_dig_port->base.base.dev;
654 struct drm_i915_private *dev_priv = dev->dev_private; 656 struct drm_i915_private *dev_priv = dev->dev_private;
655 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg; 657 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
656 uint32_t status; 658 uint32_t status;
657 bool done; 659 bool done;
658 660
@@ -679,7 +681,7 @@ static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
679 * The clock divider is based off the hrawclk, and would like to run at 681 * The clock divider is based off the hrawclk, and would like to run at
680 * 2MHz. So, take the hrawclk value and divide by 2 and use that 682 * 2MHz. So, take the hrawclk value and divide by 2 and use that
681 */ 683 */
682 return index ? 0 : intel_hrawclk(dev) / 2; 684 return index ? 0 : DIV_ROUND_CLOSEST(intel_hrawclk(dev), 2);
683} 685}
684 686
685static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 687static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
@@ -692,10 +694,10 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
692 return 0; 694 return 0;
693 695
694 if (intel_dig_port->port == PORT_A) { 696 if (intel_dig_port->port == PORT_A) {
695 return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000); 697 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
696 698
697 } else { 699 } else {
698 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2); 700 return DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
699 } 701 }
700} 702}
701 703
@@ -709,7 +711,7 @@ static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
709 if (index) 711 if (index)
710 return 0; 712 return 0;
711 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000); 713 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
712 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { 714 } else if (HAS_PCH_LPT_H(dev_priv)) {
713 /* Workaround for non-ULT HSW */ 715 /* Workaround for non-ULT HSW */
714 switch (index) { 716 switch (index) {
715 case 0: return 63; 717 case 0: return 63;
@@ -717,7 +719,7 @@ static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
717 default: return 0; 719 default: return 0;
718 } 720 }
719 } else { 721 } else {
720 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2); 722 return index ? 0 : DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
721 } 723 }
722} 724}
723 725
@@ -750,7 +752,7 @@ static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
750 else 752 else
751 precharge = 5; 753 precharge = 5;
752 754
753 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL) 755 if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
754 timeout = DP_AUX_CH_CTL_TIME_OUT_600us; 756 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
755 else 757 else
756 timeout = DP_AUX_CH_CTL_TIME_OUT_400us; 758 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
@@ -789,8 +791,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
789 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 791 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
790 struct drm_device *dev = intel_dig_port->base.base.dev; 792 struct drm_device *dev = intel_dig_port->base.base.dev;
791 struct drm_i915_private *dev_priv = dev->dev_private; 793 struct drm_i915_private *dev_priv = dev->dev_private;
792 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg; 794 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
793 uint32_t ch_data = ch_ctl + 4;
794 uint32_t aux_clock_divider; 795 uint32_t aux_clock_divider;
795 int i, ret, recv_bytes; 796 int i, ret, recv_bytes;
796 uint32_t status; 797 uint32_t status;
@@ -816,8 +817,6 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
816 817
817 intel_dp_check_edp(intel_dp); 818 intel_dp_check_edp(intel_dp);
818 819
819 intel_aux_display_runtime_get(dev_priv);
820
821 /* Try to wait for any previous AUX channel activity */ 820 /* Try to wait for any previous AUX channel activity */
822 for (try = 0; try < 3; try++) { 821 for (try = 0; try < 3; try++) {
823 status = I915_READ_NOTRACE(ch_ctl); 822 status = I915_READ_NOTRACE(ch_ctl);
@@ -856,7 +855,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
856 for (try = 0; try < 5; try++) { 855 for (try = 0; try < 5; try++) {
857 /* Load the send data into the aux channel data registers */ 856 /* Load the send data into the aux channel data registers */
858 for (i = 0; i < send_bytes; i += 4) 857 for (i = 0; i < send_bytes; i += 4)
859 I915_WRITE(ch_data + i, 858 I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
860 intel_dp_pack_aux(send + i, 859 intel_dp_pack_aux(send + i,
861 send_bytes - i)); 860 send_bytes - i));
862 861
@@ -920,13 +919,12 @@ done:
920 recv_bytes = recv_size; 919 recv_bytes = recv_size;
921 920
922 for (i = 0; i < recv_bytes; i += 4) 921 for (i = 0; i < recv_bytes; i += 4)
923 intel_dp_unpack_aux(I915_READ(ch_data + i), 922 intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
924 recv + i, recv_bytes - i); 923 recv + i, recv_bytes - i);
925 924
926 ret = recv_bytes; 925 ret = recv_bytes;
927out: 926out:
928 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); 927 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
929 intel_aux_display_runtime_put(dev_priv);
930 928
931 if (vdd) 929 if (vdd)
932 edp_panel_vdd_off(intel_dp, false); 930 edp_panel_vdd_off(intel_dp, false);
@@ -1008,96 +1006,206 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
1008 return ret; 1006 return ret;
1009} 1007}
1010 1008
1011static void 1009static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1012intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector) 1010 enum port port)
1013{ 1011{
1014 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1012 switch (port) {
1015 struct drm_i915_private *dev_priv = dev->dev_private; 1013 case PORT_B:
1016 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1014 case PORT_C:
1017 enum port port = intel_dig_port->port; 1015 case PORT_D:
1018 struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port]; 1016 return DP_AUX_CH_CTL(port);
1019 const char *name = NULL; 1017 default:
1020 uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL; 1018 MISSING_CASE(port);
1021 int ret; 1019 return DP_AUX_CH_CTL(PORT_B);
1020 }
1021}
1022 1022
1023 /* On SKL we don't have Aux for port E so we rely on VBT to set 1023static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
1024 * a proper alternate aux channel. 1024 enum port port, int index)
1025 */ 1025{
1026 if (IS_SKYLAKE(dev) && port == PORT_E) { 1026 switch (port) {
1027 switch (info->alternate_aux_channel) { 1027 case PORT_B:
1028 case DP_AUX_B: 1028 case PORT_C:
1029 porte_aux_ctl_reg = DPB_AUX_CH_CTL; 1029 case PORT_D:
1030 break; 1030 return DP_AUX_CH_DATA(port, index);
1031 case DP_AUX_C: 1031 default:
1032 porte_aux_ctl_reg = DPC_AUX_CH_CTL; 1032 MISSING_CASE(port);
1033 break; 1033 return DP_AUX_CH_DATA(PORT_B, index);
1034 case DP_AUX_D:
1035 porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1036 break;
1037 case DP_AUX_A:
1038 default:
1039 porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1040 }
1041 } 1034 }
1035}
1042 1036
1037static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
1038 enum port port)
1039{
1043 switch (port) { 1040 switch (port) {
1044 case PORT_A: 1041 case PORT_A:
1045 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL; 1042 return DP_AUX_CH_CTL(port);
1046 name = "DPDDC-A";
1047 break;
1048 case PORT_B: 1043 case PORT_B:
1049 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1050 name = "DPDDC-B";
1051 break;
1052 case PORT_C: 1044 case PORT_C:
1053 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1054 name = "DPDDC-C";
1055 break;
1056 case PORT_D: 1045 case PORT_D:
1057 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL; 1046 return PCH_DP_AUX_CH_CTL(port);
1058 name = "DPDDC-D";
1059 break;
1060 case PORT_E:
1061 intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1062 name = "DPDDC-E";
1063 break;
1064 default: 1047 default:
1065 BUG(); 1048 MISSING_CASE(port);
1049 return DP_AUX_CH_CTL(PORT_A);
1066 } 1050 }
1051}
1067 1052
1068 /* 1053static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1069 * The AUX_CTL register is usually DP_CTL + 0x10. 1054 enum port port, int index)
1070 * 1055{
1071 * On Haswell and Broadwell though: 1056 switch (port) {
1072 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU 1057 case PORT_A:
1073 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU 1058 return DP_AUX_CH_DATA(port, index);
1074 * 1059 case PORT_B:
1075 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU. 1060 case PORT_C:
1076 */ 1061 case PORT_D:
1077 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E) 1062 return PCH_DP_AUX_CH_DATA(port, index);
1078 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10; 1063 default:
1064 MISSING_CASE(port);
1065 return DP_AUX_CH_DATA(PORT_A, index);
1066 }
1067}
1068
1069/*
1070 * On SKL we don't have Aux for port E so we rely
1071 * on VBT to set a proper alternate aux channel.
1072 */
1073static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
1074{
1075 const struct ddi_vbt_port_info *info =
1076 &dev_priv->vbt.ddi_port_info[PORT_E];
1077
1078 switch (info->alternate_aux_channel) {
1079 case DP_AUX_A:
1080 return PORT_A;
1081 case DP_AUX_B:
1082 return PORT_B;
1083 case DP_AUX_C:
1084 return PORT_C;
1085 case DP_AUX_D:
1086 return PORT_D;
1087 default:
1088 MISSING_CASE(info->alternate_aux_channel);
1089 return PORT_A;
1090 }
1091}
1092
1093static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1094 enum port port)
1095{
1096 if (port == PORT_E)
1097 port = skl_porte_aux_port(dev_priv);
1098
1099 switch (port) {
1100 case PORT_A:
1101 case PORT_B:
1102 case PORT_C:
1103 case PORT_D:
1104 return DP_AUX_CH_CTL(port);
1105 default:
1106 MISSING_CASE(port);
1107 return DP_AUX_CH_CTL(PORT_A);
1108 }
1109}
1110
1111static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1112 enum port port, int index)
1113{
1114 if (port == PORT_E)
1115 port = skl_porte_aux_port(dev_priv);
1116
1117 switch (port) {
1118 case PORT_A:
1119 case PORT_B:
1120 case PORT_C:
1121 case PORT_D:
1122 return DP_AUX_CH_DATA(port, index);
1123 default:
1124 MISSING_CASE(port);
1125 return DP_AUX_CH_DATA(PORT_A, index);
1126 }
1127}
1128
1129static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1130 enum port port)
1131{
1132 if (INTEL_INFO(dev_priv)->gen >= 9)
1133 return skl_aux_ctl_reg(dev_priv, port);
1134 else if (HAS_PCH_SPLIT(dev_priv))
1135 return ilk_aux_ctl_reg(dev_priv, port);
1136 else
1137 return g4x_aux_ctl_reg(dev_priv, port);
1138}
1139
1140static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1141 enum port port, int index)
1142{
1143 if (INTEL_INFO(dev_priv)->gen >= 9)
1144 return skl_aux_data_reg(dev_priv, port, index);
1145 else if (HAS_PCH_SPLIT(dev_priv))
1146 return ilk_aux_data_reg(dev_priv, port, index);
1147 else
1148 return g4x_aux_data_reg(dev_priv, port, index);
1149}
1150
1151static void intel_aux_reg_init(struct intel_dp *intel_dp)
1152{
1153 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1154 enum port port = dp_to_dig_port(intel_dp)->port;
1155 int i;
1156
1157 intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
1158 for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
1159 intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
1160}
1161
1162static void
1163intel_dp_aux_fini(struct intel_dp *intel_dp)
1164{
1165 drm_dp_aux_unregister(&intel_dp->aux);
1166 kfree(intel_dp->aux.name);
1167}
1168
1169static int
1170intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1171{
1172 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1173 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1174 enum port port = intel_dig_port->port;
1175 int ret;
1176
1177 intel_aux_reg_init(intel_dp);
1178
1179 intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1180 if (!intel_dp->aux.name)
1181 return -ENOMEM;
1079 1182
1080 intel_dp->aux.name = name;
1081 intel_dp->aux.dev = dev->dev; 1183 intel_dp->aux.dev = dev->dev;
1082 intel_dp->aux.transfer = intel_dp_aux_transfer; 1184 intel_dp->aux.transfer = intel_dp_aux_transfer;
1083 1185
1084 DRM_DEBUG_KMS("registering %s bus for %s\n", name, 1186 DRM_DEBUG_KMS("registering %s bus for %s\n",
1187 intel_dp->aux.name,
1085 connector->base.kdev->kobj.name); 1188 connector->base.kdev->kobj.name);
1086 1189
1087 ret = drm_dp_aux_register(&intel_dp->aux); 1190 ret = drm_dp_aux_register(&intel_dp->aux);
1088 if (ret < 0) { 1191 if (ret < 0) {
1089 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n", 1192 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1090 name, ret); 1193 intel_dp->aux.name, ret);
1091 return; 1194 kfree(intel_dp->aux.name);
1195 return ret;
1092 } 1196 }
1093 1197
1094 ret = sysfs_create_link(&connector->base.kdev->kobj, 1198 ret = sysfs_create_link(&connector->base.kdev->kobj,
1095 &intel_dp->aux.ddc.dev.kobj, 1199 &intel_dp->aux.ddc.dev.kobj,
1096 intel_dp->aux.ddc.dev.kobj.name); 1200 intel_dp->aux.ddc.dev.kobj.name);
1097 if (ret < 0) { 1201 if (ret < 0) {
1098 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret); 1202 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n",
1099 drm_dp_aux_unregister(&intel_dp->aux); 1203 intel_dp->aux.name, ret);
1204 intel_dp_aux_fini(intel_dp);
1205 return ret;
1100 } 1206 }
1207
1208 return 0;
1101} 1209}
1102 1210
1103static void 1211static void
@@ -1189,10 +1297,13 @@ intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1189 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1; 1297 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1190} 1298}
1191 1299
1192static bool intel_dp_source_supports_hbr2(struct drm_device *dev) 1300bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1193{ 1301{
1302 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1303 struct drm_device *dev = dig_port->base.base.dev;
1304
1194 /* WaDisableHBR2:skl */ 1305 /* WaDisableHBR2:skl */
1195 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) 1306 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
1196 return false; 1307 return false;
1197 1308
1198 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) || 1309 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
@@ -1203,14 +1314,16 @@ static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
1203} 1314}
1204 1315
1205static int 1316static int
1206intel_dp_source_rates(struct drm_device *dev, const int **source_rates) 1317intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
1207{ 1318{
1319 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1320 struct drm_device *dev = dig_port->base.base.dev;
1208 int size; 1321 int size;
1209 1322
1210 if (IS_BROXTON(dev)) { 1323 if (IS_BROXTON(dev)) {
1211 *source_rates = bxt_rates; 1324 *source_rates = bxt_rates;
1212 size = ARRAY_SIZE(bxt_rates); 1325 size = ARRAY_SIZE(bxt_rates);
1213 } else if (IS_SKYLAKE(dev)) { 1326 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
1214 *source_rates = skl_rates; 1327 *source_rates = skl_rates;
1215 size = ARRAY_SIZE(skl_rates); 1328 size = ARRAY_SIZE(skl_rates);
1216 } else { 1329 } else {
@@ -1219,7 +1332,7 @@ intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1219 } 1332 }
1220 1333
1221 /* This depends on the fact that 5.4 is last value in the array */ 1334 /* This depends on the fact that 5.4 is last value in the array */
1222 if (!intel_dp_source_supports_hbr2(dev)) 1335 if (!intel_dp_source_supports_hbr2(intel_dp))
1223 size--; 1336 size--;
1224 1337
1225 return size; 1338 return size;
@@ -1284,12 +1397,11 @@ static int intersect_rates(const int *source_rates, int source_len,
1284static int intel_dp_common_rates(struct intel_dp *intel_dp, 1397static int intel_dp_common_rates(struct intel_dp *intel_dp,
1285 int *common_rates) 1398 int *common_rates)
1286{ 1399{
1287 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1288 const int *source_rates, *sink_rates; 1400 const int *source_rates, *sink_rates;
1289 int source_len, sink_len; 1401 int source_len, sink_len;
1290 1402
1291 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates); 1403 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1292 source_len = intel_dp_source_rates(dev, &source_rates); 1404 source_len = intel_dp_source_rates(intel_dp, &source_rates);
1293 1405
1294 return intersect_rates(source_rates, source_len, 1406 return intersect_rates(source_rates, source_len,
1295 sink_rates, sink_len, 1407 sink_rates, sink_len,
@@ -1314,7 +1426,6 @@ static void snprintf_int_array(char *str, size_t len,
1314 1426
1315static void intel_dp_print_rates(struct intel_dp *intel_dp) 1427static void intel_dp_print_rates(struct intel_dp *intel_dp)
1316{ 1428{
1317 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1318 const int *source_rates, *sink_rates; 1429 const int *source_rates, *sink_rates;
1319 int source_len, sink_len, common_len; 1430 int source_len, sink_len, common_len;
1320 int common_rates[DP_MAX_SUPPORTED_RATES]; 1431 int common_rates[DP_MAX_SUPPORTED_RATES];
@@ -1323,7 +1434,7 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp)
1323 if ((drm_debug & DRM_UT_KMS) == 0) 1434 if ((drm_debug & DRM_UT_KMS) == 0)
1324 return; 1435 return;
1325 1436
1326 source_len = intel_dp_source_rates(dev, &source_rates); 1437 source_len = intel_dp_source_rates(intel_dp, &source_rates);
1327 snprintf_int_array(str, sizeof(str), source_rates, source_len); 1438 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1328 DRM_DEBUG_KMS("source rates: %s\n", str); 1439 DRM_DEBUG_KMS("source rates: %s\n", str);
1329 1440
@@ -1365,8 +1476,8 @@ int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1365 return rate_to_index(rate, intel_dp->sink_rates); 1476 return rate_to_index(rate, intel_dp->sink_rates);
1366} 1477}
1367 1478
1368static void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, 1479void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1369 uint8_t *link_bw, uint8_t *rate_select) 1480 uint8_t *link_bw, uint8_t *rate_select)
1370{ 1481{
1371 if (intel_dp->num_sink_rates) { 1482 if (intel_dp->num_sink_rates) {
1372 *link_bw = 0; 1483 *link_bw = 0;
@@ -1426,7 +1537,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
1426 return ret; 1537 return ret;
1427 } 1538 }
1428 1539
1429 if (!HAS_PCH_SPLIT(dev)) 1540 if (HAS_GMCH_DISPLAY(dev))
1430 intel_gmch_panel_fitting(intel_crtc, pipe_config, 1541 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1431 intel_connector->panel.fitting_mode); 1542 intel_connector->panel.fitting_mode);
1432 else 1543 else
@@ -1530,7 +1641,7 @@ found:
1530 &pipe_config->dp_m2_n2); 1641 &pipe_config->dp_m2_n2);
1531 } 1642 }
1532 1643
1533 if (IS_SKYLAKE(dev) && is_edp(intel_dp)) 1644 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && is_edp(intel_dp))
1534 skl_edp_set_pll_config(pipe_config); 1645 skl_edp_set_pll_config(pipe_config);
1535 else if (IS_BROXTON(dev)) 1646 else if (IS_BROXTON(dev))
1536 /* handled in ddi */; 1647 /* handled in ddi */;
@@ -1542,37 +1653,6 @@ found:
1542 return true; 1653 return true;
1543} 1654}
1544 1655
1545static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1546{
1547 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1548 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1549 struct drm_device *dev = crtc->base.dev;
1550 struct drm_i915_private *dev_priv = dev->dev_private;
1551 u32 dpa_ctl;
1552
1553 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1554 crtc->config->port_clock);
1555 dpa_ctl = I915_READ(DP_A);
1556 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1557
1558 if (crtc->config->port_clock == 162000) {
1559 /* For a long time we've carried around a ILK-DevA w/a for the
1560 * 160MHz clock. If we're really unlucky, it's still required.
1561 */
1562 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1563 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1564 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1565 } else {
1566 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1567 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1568 }
1569
1570 I915_WRITE(DP_A, dpa_ctl);
1571
1572 POSTING_READ(DP_A);
1573 udelay(500);
1574}
1575
1576void intel_dp_set_link_params(struct intel_dp *intel_dp, 1656void intel_dp_set_link_params(struct intel_dp *intel_dp,
1577 const struct intel_crtc_state *pipe_config) 1657 const struct intel_crtc_state *pipe_config)
1578{ 1658{
@@ -1617,9 +1697,6 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
1617 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 1697 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1618 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count); 1698 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
1619 1699
1620 if (crtc->config->has_audio)
1621 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1622
1623 /* Split out the IBX/CPU vs CPT settings */ 1700 /* Split out the IBX/CPU vs CPT settings */
1624 1701
1625 if (IS_GEN7(dev) && port == PORT_A) { 1702 if (IS_GEN7(dev) && port == PORT_A) {
@@ -1680,7 +1757,7 @@ static void wait_panel_status(struct intel_dp *intel_dp,
1680{ 1757{
1681 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1758 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1682 struct drm_i915_private *dev_priv = dev->dev_private; 1759 struct drm_i915_private *dev_priv = dev->dev_private;
1683 u32 pp_stat_reg, pp_ctrl_reg; 1760 i915_reg_t pp_stat_reg, pp_ctrl_reg;
1684 1761
1685 lockdep_assert_held(&dev_priv->pps_mutex); 1762 lockdep_assert_held(&dev_priv->pps_mutex);
1686 1763
@@ -1770,7 +1847,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1770 struct drm_i915_private *dev_priv = dev->dev_private; 1847 struct drm_i915_private *dev_priv = dev->dev_private;
1771 enum intel_display_power_domain power_domain; 1848 enum intel_display_power_domain power_domain;
1772 u32 pp; 1849 u32 pp;
1773 u32 pp_stat_reg, pp_ctrl_reg; 1850 i915_reg_t pp_stat_reg, pp_ctrl_reg;
1774 bool need_to_disable = !intel_dp->want_panel_vdd; 1851 bool need_to_disable = !intel_dp->want_panel_vdd;
1775 1852
1776 lockdep_assert_held(&dev_priv->pps_mutex); 1853 lockdep_assert_held(&dev_priv->pps_mutex);
@@ -1784,7 +1861,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1784 if (edp_have_panel_vdd(intel_dp)) 1861 if (edp_have_panel_vdd(intel_dp))
1785 return need_to_disable; 1862 return need_to_disable;
1786 1863
1787 power_domain = intel_display_port_power_domain(intel_encoder); 1864 power_domain = intel_display_port_aux_power_domain(intel_encoder);
1788 intel_display_power_get(dev_priv, power_domain); 1865 intel_display_power_get(dev_priv, power_domain);
1789 1866
1790 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n", 1867 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
@@ -1846,7 +1923,7 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1846 struct intel_encoder *intel_encoder = &intel_dig_port->base; 1923 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1847 enum intel_display_power_domain power_domain; 1924 enum intel_display_power_domain power_domain;
1848 u32 pp; 1925 u32 pp;
1849 u32 pp_stat_reg, pp_ctrl_reg; 1926 i915_reg_t pp_stat_reg, pp_ctrl_reg;
1850 1927
1851 lockdep_assert_held(&dev_priv->pps_mutex); 1928 lockdep_assert_held(&dev_priv->pps_mutex);
1852 1929
@@ -1874,7 +1951,7 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1874 if ((pp & POWER_TARGET_ON) == 0) 1951 if ((pp & POWER_TARGET_ON) == 0)
1875 intel_dp->last_power_cycle = jiffies; 1952 intel_dp->last_power_cycle = jiffies;
1876 1953
1877 power_domain = intel_display_port_power_domain(intel_encoder); 1954 power_domain = intel_display_port_aux_power_domain(intel_encoder);
1878 intel_display_power_put(dev_priv, power_domain); 1955 intel_display_power_put(dev_priv, power_domain);
1879} 1956}
1880 1957
@@ -1933,7 +2010,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
1933 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2010 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1934 struct drm_i915_private *dev_priv = dev->dev_private; 2011 struct drm_i915_private *dev_priv = dev->dev_private;
1935 u32 pp; 2012 u32 pp;
1936 u32 pp_ctrl_reg; 2013 i915_reg_t pp_ctrl_reg;
1937 2014
1938 lockdep_assert_held(&dev_priv->pps_mutex); 2015 lockdep_assert_held(&dev_priv->pps_mutex);
1939 2016
@@ -1995,7 +2072,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
1995 struct drm_i915_private *dev_priv = dev->dev_private; 2072 struct drm_i915_private *dev_priv = dev->dev_private;
1996 enum intel_display_power_domain power_domain; 2073 enum intel_display_power_domain power_domain;
1997 u32 pp; 2074 u32 pp;
1998 u32 pp_ctrl_reg; 2075 i915_reg_t pp_ctrl_reg;
1999 2076
2000 lockdep_assert_held(&dev_priv->pps_mutex); 2077 lockdep_assert_held(&dev_priv->pps_mutex);
2001 2078
@@ -2025,7 +2102,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
2025 wait_panel_off(intel_dp); 2102 wait_panel_off(intel_dp);
2026 2103
2027 /* We got a reference when we enabled the VDD. */ 2104 /* We got a reference when we enabled the VDD. */
2028 power_domain = intel_display_port_power_domain(intel_encoder); 2105 power_domain = intel_display_port_aux_power_domain(intel_encoder);
2029 intel_display_power_put(dev_priv, power_domain); 2106 intel_display_power_put(dev_priv, power_domain);
2030} 2107}
2031 2108
@@ -2046,7 +2123,7 @@ static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2046 struct drm_device *dev = intel_dig_port->base.base.dev; 2123 struct drm_device *dev = intel_dig_port->base.base.dev;
2047 struct drm_i915_private *dev_priv = dev->dev_private; 2124 struct drm_i915_private *dev_priv = dev->dev_private;
2048 u32 pp; 2125 u32 pp;
2049 u32 pp_ctrl_reg; 2126 i915_reg_t pp_ctrl_reg;
2050 2127
2051 /* 2128 /*
2052 * If we enable the backlight right away following a panel power 2129 * If we enable the backlight right away following a panel power
@@ -2087,7 +2164,7 @@ static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2087 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2164 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2088 struct drm_i915_private *dev_priv = dev->dev_private; 2165 struct drm_i915_private *dev_priv = dev->dev_private;
2089 u32 pp; 2166 u32 pp;
2090 u32 pp_ctrl_reg; 2167 i915_reg_t pp_ctrl_reg;
2091 2168
2092 if (!is_edp(intel_dp)) 2169 if (!is_edp(intel_dp))
2093 return; 2170 return;
@@ -2146,27 +2223,61 @@ static void intel_edp_backlight_power(struct intel_connector *connector,
2146 _intel_edp_backlight_off(intel_dp); 2223 _intel_edp_backlight_off(intel_dp);
2147} 2224}
2148 2225
2226static const char *state_string(bool enabled)
2227{
2228 return enabled ? "on" : "off";
2229}
2230
2231static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2232{
2233 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2234 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2235 bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2236
2237 I915_STATE_WARN(cur_state != state,
2238 "DP port %c state assertion failure (expected %s, current %s)\n",
2239 port_name(dig_port->port),
2240 state_string(state), state_string(cur_state));
2241}
2242#define assert_dp_port_disabled(d) assert_dp_port((d), false)
2243
2244static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2245{
2246 bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2247
2248 I915_STATE_WARN(cur_state != state,
2249 "eDP PLL state assertion failure (expected %s, current %s)\n",
2250 state_string(state), state_string(cur_state));
2251}
2252#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2253#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2254
2149static void ironlake_edp_pll_on(struct intel_dp *intel_dp) 2255static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2150{ 2256{
2151 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2257 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2152 struct drm_crtc *crtc = intel_dig_port->base.base.crtc; 2258 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2153 struct drm_device *dev = crtc->dev; 2259 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2154 struct drm_i915_private *dev_priv = dev->dev_private;
2155 u32 dpa_ctl;
2156 2260
2157 assert_pipe_disabled(dev_priv, 2261 assert_pipe_disabled(dev_priv, crtc->pipe);
2158 to_intel_crtc(crtc)->pipe); 2262 assert_dp_port_disabled(intel_dp);
2263 assert_edp_pll_disabled(dev_priv);
2264
2265 DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2266 crtc->config->port_clock);
2267
2268 intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2269
2270 if (crtc->config->port_clock == 162000)
2271 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2272 else
2273 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2274
2275 I915_WRITE(DP_A, intel_dp->DP);
2276 POSTING_READ(DP_A);
2277 udelay(500);
2159 2278
2160 DRM_DEBUG_KMS("\n");
2161 dpa_ctl = I915_READ(DP_A);
2162 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2163 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2164
2165 /* We don't adjust intel_dp->DP while tearing down the link, to
2166 * facilitate link retraining (e.g. after hotplug). Hence clear all
2167 * enable bits here to ensure that we don't enable too much. */
2168 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2169 intel_dp->DP |= DP_PLL_ENABLE; 2279 intel_dp->DP |= DP_PLL_ENABLE;
2280
2170 I915_WRITE(DP_A, intel_dp->DP); 2281 I915_WRITE(DP_A, intel_dp->DP);
2171 POSTING_READ(DP_A); 2282 POSTING_READ(DP_A);
2172 udelay(200); 2283 udelay(200);
@@ -2175,24 +2286,18 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2175static void ironlake_edp_pll_off(struct intel_dp *intel_dp) 2286static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2176{ 2287{
2177 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2288 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2178 struct drm_crtc *crtc = intel_dig_port->base.base.crtc; 2289 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2179 struct drm_device *dev = crtc->dev; 2290 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2180 struct drm_i915_private *dev_priv = dev->dev_private; 2291
2181 u32 dpa_ctl; 2292 assert_pipe_disabled(dev_priv, crtc->pipe);
2293 assert_dp_port_disabled(intel_dp);
2294 assert_edp_pll_enabled(dev_priv);
2182 2295
2183 assert_pipe_disabled(dev_priv, 2296 DRM_DEBUG_KMS("disabling eDP PLL\n");
2184 to_intel_crtc(crtc)->pipe);
2185 2297
2186 dpa_ctl = I915_READ(DP_A); 2298 intel_dp->DP &= ~DP_PLL_ENABLE;
2187 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2188 "dp pll off, should be on\n");
2189 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2190 2299
2191 /* We can't rely on the value tracked for the DP register in 2300 I915_WRITE(DP_A, intel_dp->DP);
2192 * intel_dp->DP because link_down must not change that (otherwise link
2193 * re-training will fail. */
2194 dpa_ctl &= ~DP_PLL_ENABLE;
2195 I915_WRITE(DP_A, dpa_ctl);
2196 POSTING_READ(DP_A); 2301 POSTING_READ(DP_A);
2197 udelay(200); 2302 udelay(200);
2198} 2303}
@@ -2261,7 +2366,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2261 } 2366 }
2262 2367
2263 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", 2368 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2264 intel_dp->output_reg); 2369 i915_mmio_reg_offset(intel_dp->output_reg));
2265 } else if (IS_CHERRYVIEW(dev)) { 2370 } else if (IS_CHERRYVIEW(dev)) {
2266 *pipe = DP_PORT_TO_PIPE_CHV(tmp); 2371 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2267 } else { 2372 } else {
@@ -2324,7 +2429,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
2324 intel_dp_get_m_n(crtc, pipe_config); 2429 intel_dp_get_m_n(crtc, pipe_config);
2325 2430
2326 if (port == PORT_A) { 2431 if (port == PORT_A) {
2327 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ) 2432 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
2328 pipe_config->port_clock = 162000; 2433 pipe_config->port_clock = 162000;
2329 else 2434 else
2330 pipe_config->port_clock = 270000; 2435 pipe_config->port_clock = 270000;
@@ -2389,6 +2494,8 @@ static void ilk_post_disable_dp(struct intel_encoder *encoder)
2389 enum port port = dp_to_dig_port(intel_dp)->port; 2494 enum port port = dp_to_dig_port(intel_dp)->port;
2390 2495
2391 intel_dp_link_down(intel_dp); 2496 intel_dp_link_down(intel_dp);
2497
2498 /* Only ilk+ has port A */
2392 if (port == PORT_A) 2499 if (port == PORT_A)
2393 ironlake_edp_pll_off(intel_dp); 2500 ironlake_edp_pll_off(intel_dp);
2394} 2501}
@@ -2548,6 +2655,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp)
2548{ 2655{
2549 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2656 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2550 struct drm_i915_private *dev_priv = dev->dev_private; 2657 struct drm_i915_private *dev_priv = dev->dev_private;
2658 struct intel_crtc *crtc =
2659 to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
2551 2660
2552 /* enable with pattern 1 (as per spec) */ 2661 /* enable with pattern 1 (as per spec) */
2553 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, 2662 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
@@ -2563,6 +2672,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp)
2563 * fail when the power sequencer is freshly used for this port. 2672 * fail when the power sequencer is freshly used for this port.
2564 */ 2673 */
2565 intel_dp->DP |= DP_PORT_EN; 2674 intel_dp->DP |= DP_PORT_EN;
2675 if (crtc->config->has_audio)
2676 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
2566 2677
2567 I915_WRITE(intel_dp->output_reg, intel_dp->DP); 2678 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2568 POSTING_READ(intel_dp->output_reg); 2679 POSTING_READ(intel_dp->output_reg);
@@ -2575,6 +2686,8 @@ static void intel_enable_dp(struct intel_encoder *encoder)
2575 struct drm_i915_private *dev_priv = dev->dev_private; 2686 struct drm_i915_private *dev_priv = dev->dev_private;
2576 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 2687 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2577 uint32_t dp_reg = I915_READ(intel_dp->output_reg); 2688 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2689 enum port port = dp_to_dig_port(intel_dp)->port;
2690 enum pipe pipe = crtc->pipe;
2578 2691
2579 if (WARN_ON(dp_reg & DP_PORT_EN)) 2692 if (WARN_ON(dp_reg & DP_PORT_EN))
2580 return; 2693 return;
@@ -2584,12 +2697,35 @@ static void intel_enable_dp(struct intel_encoder *encoder)
2584 if (IS_VALLEYVIEW(dev)) 2697 if (IS_VALLEYVIEW(dev))
2585 vlv_init_panel_power_sequencer(intel_dp); 2698 vlv_init_panel_power_sequencer(intel_dp);
2586 2699
2700 /*
2701 * We get an occasional spurious underrun between the port
2702 * enable and vdd enable, when enabling port A eDP.
2703 *
2704 * FIXME: Not sure if this applies to (PCH) port D eDP as well
2705 */
2706 if (port == PORT_A)
2707 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2708
2587 intel_dp_enable_port(intel_dp); 2709 intel_dp_enable_port(intel_dp);
2588 2710
2711 if (port == PORT_A && IS_GEN5(dev_priv)) {
2712 /*
2713 * Underrun reporting for the other pipe was disabled in
2714 * g4x_pre_enable_dp(). The eDP PLL and port have now been
2715 * enabled, so it's now safe to re-enable underrun reporting.
2716 */
2717 intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
2718 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
2719 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
2720 }
2721
2589 edp_panel_vdd_on(intel_dp); 2722 edp_panel_vdd_on(intel_dp);
2590 edp_panel_on(intel_dp); 2723 edp_panel_on(intel_dp);
2591 edp_panel_vdd_off(intel_dp, true); 2724 edp_panel_vdd_off(intel_dp, true);
2592 2725
2726 if (port == PORT_A)
2727 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2728
2593 pps_unlock(intel_dp); 2729 pps_unlock(intel_dp);
2594 2730
2595 if (IS_VALLEYVIEW(dev)) { 2731 if (IS_VALLEYVIEW(dev)) {
@@ -2608,7 +2744,7 @@ static void intel_enable_dp(struct intel_encoder *encoder)
2608 2744
2609 if (crtc->config->has_audio) { 2745 if (crtc->config->has_audio) {
2610 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n", 2746 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2611 pipe_name(crtc->pipe)); 2747 pipe_name(pipe));
2612 intel_audio_codec_enable(encoder); 2748 intel_audio_codec_enable(encoder);
2613 } 2749 }
2614} 2750}
@@ -2631,16 +2767,29 @@ static void vlv_enable_dp(struct intel_encoder *encoder)
2631 2767
2632static void g4x_pre_enable_dp(struct intel_encoder *encoder) 2768static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2633{ 2769{
2770 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2634 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2771 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2635 struct intel_digital_port *dport = dp_to_dig_port(intel_dp); 2772 enum port port = dp_to_dig_port(intel_dp)->port;
2773 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
2636 2774
2637 intel_dp_prepare(encoder); 2775 intel_dp_prepare(encoder);
2638 2776
2777 if (port == PORT_A && IS_GEN5(dev_priv)) {
2778 /*
2779 * We get FIFO underruns on the other pipe when
2780 * enabling the CPU eDP PLL, and when enabling CPU
2781 * eDP port. We could potentially avoid the PLL
2782 * underrun with a vblank wait just prior to enabling
2783 * the PLL, but that doesn't appear to help the port
2784 * enable case. Just sweep it all under the rug.
2785 */
2786 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
2787 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
2788 }
2789
2639 /* Only ilk+ has port A */ 2790 /* Only ilk+ has port A */
2640 if (dport->port == PORT_A) { 2791 if (port == PORT_A)
2641 ironlake_set_pll_cpu_edp(intel_dp);
2642 ironlake_edp_pll_on(intel_dp); 2792 ironlake_edp_pll_on(intel_dp);
2643 }
2644} 2793}
2645 2794
2646static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) 2795static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
@@ -2648,7 +2797,7 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2648 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2797 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2649 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private; 2798 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2650 enum pipe pipe = intel_dp->pps_pipe; 2799 enum pipe pipe = intel_dp->pps_pipe;
2651 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe); 2800 i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2652 2801
2653 edp_panel_vdd_off_sync(intel_dp); 2802 edp_panel_vdd_off_sync(intel_dp);
2654 2803
@@ -3046,7 +3195,7 @@ intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3046 * Fetch AUX CH registers 0x202 - 0x207 which contain 3195 * Fetch AUX CH registers 0x202 - 0x207 which contain
3047 * link status information 3196 * link status information
3048 */ 3197 */
3049static bool 3198bool
3050intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 3199intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3051{ 3200{
3052 return intel_dp_dpcd_read_wake(&intel_dp->aux, 3201 return intel_dp_dpcd_read_wake(&intel_dp->aux,
@@ -3056,7 +3205,7 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_
3056} 3205}
3057 3206
3058/* These are source-specific values. */ 3207/* These are source-specific values. */
3059static uint8_t 3208uint8_t
3060intel_dp_voltage_max(struct intel_dp *intel_dp) 3209intel_dp_voltage_max(struct intel_dp *intel_dp)
3061{ 3210{
3062 struct drm_device *dev = intel_dp_to_dev(intel_dp); 3211 struct drm_device *dev = intel_dp_to_dev(intel_dp);
@@ -3079,7 +3228,7 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
3079 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; 3228 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3080} 3229}
3081 3230
3082static uint8_t 3231uint8_t
3083intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) 3232intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3084{ 3233{
3085 struct drm_device *dev = intel_dp_to_dev(intel_dp); 3234 struct drm_device *dev = intel_dp_to_dev(intel_dp);
@@ -3421,38 +3570,6 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3421 return 0; 3570 return 0;
3422} 3571}
3423 3572
3424static void
3425intel_get_adjust_train(struct intel_dp *intel_dp,
3426 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3427{
3428 uint8_t v = 0;
3429 uint8_t p = 0;
3430 int lane;
3431 uint8_t voltage_max;
3432 uint8_t preemph_max;
3433
3434 for (lane = 0; lane < intel_dp->lane_count; lane++) {
3435 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3436 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3437
3438 if (this_v > v)
3439 v = this_v;
3440 if (this_p > p)
3441 p = this_p;
3442 }
3443
3444 voltage_max = intel_dp_voltage_max(intel_dp);
3445 if (v >= voltage_max)
3446 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3447
3448 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3449 if (p >= preemph_max)
3450 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3451
3452 for (lane = 0; lane < 4; lane++)
3453 intel_dp->train_set[lane] = v | p;
3454}
3455
3456static uint32_t 3573static uint32_t
3457gen4_signal_levels(uint8_t train_set) 3574gen4_signal_levels(uint8_t train_set)
3458{ 3575{
@@ -3550,13 +3667,13 @@ gen7_edp_signal_levels(uint8_t train_set)
3550 } 3667 }
3551} 3668}
3552 3669
3553/* Properly updates "DP" with the correct signal levels. */ 3670void
3554static void 3671intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3555intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3556{ 3672{
3557 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3673 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3558 enum port port = intel_dig_port->port; 3674 enum port port = intel_dig_port->port;
3559 struct drm_device *dev = intel_dig_port->base.base.dev; 3675 struct drm_device *dev = intel_dig_port->base.base.dev;
3676 struct drm_i915_private *dev_priv = to_i915(dev);
3560 uint32_t signal_levels, mask = 0; 3677 uint32_t signal_levels, mask = 0;
3561 uint8_t train_set = intel_dp->train_set[0]; 3678 uint8_t train_set = intel_dp->train_set[0];
3562 3679
@@ -3591,74 +3708,27 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3591 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >> 3708 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3592 DP_TRAIN_PRE_EMPHASIS_SHIFT); 3709 DP_TRAIN_PRE_EMPHASIS_SHIFT);
3593 3710
3594 *DP = (*DP & ~mask) | signal_levels; 3711 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3595}
3596 3712
3597static bool 3713 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3598intel_dp_set_link_train(struct intel_dp *intel_dp,
3599 uint32_t *DP,
3600 uint8_t dp_train_pat)
3601{
3602 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3603 struct drm_i915_private *dev_priv =
3604 to_i915(intel_dig_port->base.base.dev);
3605 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3606 int ret, len;
3607
3608 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3609
3610 I915_WRITE(intel_dp->output_reg, *DP);
3611 POSTING_READ(intel_dp->output_reg); 3714 POSTING_READ(intel_dp->output_reg);
3612
3613 buf[0] = dp_train_pat;
3614 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3615 DP_TRAINING_PATTERN_DISABLE) {
3616 /* don't write DP_TRAINING_LANEx_SET on disable */
3617 len = 1;
3618 } else {
3619 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3620 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3621 len = intel_dp->lane_count + 1;
3622 }
3623
3624 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3625 buf, len);
3626
3627 return ret == len;
3628}
3629
3630static bool
3631intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3632 uint8_t dp_train_pat)
3633{
3634 if (!intel_dp->train_set_valid)
3635 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3636 intel_dp_set_signal_levels(intel_dp, DP);
3637 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3638} 3715}
3639 3716
3640static bool 3717void
3641intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP, 3718intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3642 const uint8_t link_status[DP_LINK_STATUS_SIZE]) 3719 uint8_t dp_train_pat)
3643{ 3720{
3644 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3721 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3645 struct drm_i915_private *dev_priv = 3722 struct drm_i915_private *dev_priv =
3646 to_i915(intel_dig_port->base.base.dev); 3723 to_i915(intel_dig_port->base.base.dev);
3647 int ret;
3648 3724
3649 intel_get_adjust_train(intel_dp, link_status); 3725 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3650 intel_dp_set_signal_levels(intel_dp, DP);
3651 3726
3652 I915_WRITE(intel_dp->output_reg, *DP); 3727 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3653 POSTING_READ(intel_dp->output_reg); 3728 POSTING_READ(intel_dp->output_reg);
3654
3655 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3656 intel_dp->train_set, intel_dp->lane_count);
3657
3658 return ret == intel_dp->lane_count;
3659} 3729}
3660 3730
3661static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) 3731void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3662{ 3732{
3663 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3733 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3664 struct drm_device *dev = intel_dig_port->base.base.dev; 3734 struct drm_device *dev = intel_dig_port->base.base.dev;
@@ -3689,232 +3759,6 @@ static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3689 DRM_ERROR("Timed out waiting for DP idle patterns\n"); 3759 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3690} 3760}
3691 3761
3692/* Enable corresponding port and start training pattern 1 */
3693static void
3694intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
3695{
3696 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3697 struct drm_device *dev = encoder->dev;
3698 int i;
3699 uint8_t voltage;
3700 int voltage_tries, loop_tries;
3701 uint32_t DP = intel_dp->DP;
3702 uint8_t link_config[2];
3703 uint8_t link_bw, rate_select;
3704
3705 if (HAS_DDI(dev))
3706 intel_ddi_prepare_link_retrain(encoder);
3707
3708 intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
3709 &link_bw, &rate_select);
3710
3711 /* Write the link configuration data */
3712 link_config[0] = link_bw;
3713 link_config[1] = intel_dp->lane_count;
3714 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3715 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3716 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3717 if (intel_dp->num_sink_rates)
3718 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3719 &rate_select, 1);
3720
3721 link_config[0] = 0;
3722 link_config[1] = DP_SET_ANSI_8B10B;
3723 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3724
3725 DP |= DP_PORT_EN;
3726
3727 /* clock recovery */
3728 if (!intel_dp_reset_link_train(intel_dp, &DP,
3729 DP_TRAINING_PATTERN_1 |
3730 DP_LINK_SCRAMBLING_DISABLE)) {
3731 DRM_ERROR("failed to enable link training\n");
3732 return;
3733 }
3734
3735 voltage = 0xff;
3736 voltage_tries = 0;
3737 loop_tries = 0;
3738 for (;;) {
3739 uint8_t link_status[DP_LINK_STATUS_SIZE];
3740
3741 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3742 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3743 DRM_ERROR("failed to get link status\n");
3744 break;
3745 }
3746
3747 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3748 DRM_DEBUG_KMS("clock recovery OK\n");
3749 break;
3750 }
3751
3752 /*
3753 * if we used previously trained voltage and pre-emphasis values
3754 * and we don't get clock recovery, reset link training values
3755 */
3756 if (intel_dp->train_set_valid) {
3757 DRM_DEBUG_KMS("clock recovery not ok, reset");
3758 /* clear the flag as we are not reusing train set */
3759 intel_dp->train_set_valid = false;
3760 if (!intel_dp_reset_link_train(intel_dp, &DP,
3761 DP_TRAINING_PATTERN_1 |
3762 DP_LINK_SCRAMBLING_DISABLE)) {
3763 DRM_ERROR("failed to enable link training\n");
3764 return;
3765 }
3766 continue;
3767 }
3768
3769 /* Check to see if we've tried the max voltage */
3770 for (i = 0; i < intel_dp->lane_count; i++)
3771 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3772 break;
3773 if (i == intel_dp->lane_count) {
3774 ++loop_tries;
3775 if (loop_tries == 5) {
3776 DRM_ERROR("too many full retries, give up\n");
3777 break;
3778 }
3779 intel_dp_reset_link_train(intel_dp, &DP,
3780 DP_TRAINING_PATTERN_1 |
3781 DP_LINK_SCRAMBLING_DISABLE);
3782 voltage_tries = 0;
3783 continue;
3784 }
3785
3786 /* Check to see if we've tried the same voltage 5 times */
3787 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3788 ++voltage_tries;
3789 if (voltage_tries == 5) {
3790 DRM_ERROR("too many voltage retries, give up\n");
3791 break;
3792 }
3793 } else
3794 voltage_tries = 0;
3795 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3796
3797 /* Update training set as requested by target */
3798 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3799 DRM_ERROR("failed to update link training\n");
3800 break;
3801 }
3802 }
3803
3804 intel_dp->DP = DP;
3805}
3806
3807static void
3808intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
3809{
3810 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3811 struct drm_device *dev = dig_port->base.base.dev;
3812 bool channel_eq = false;
3813 int tries, cr_tries;
3814 uint32_t DP = intel_dp->DP;
3815 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3816
3817 /*
3818 * Training Pattern 3 for HBR2 or 1.2 devices that support it.
3819 *
3820 * Intel platforms that support HBR2 also support TPS3. TPS3 support is
3821 * also mandatory for downstream devices that support HBR2.
3822 *
3823 * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
3824 * supported but still not enabled.
3825 */
3826 if (intel_dp_source_supports_hbr2(dev) &&
3827 drm_dp_tps3_supported(intel_dp->dpcd))
3828 training_pattern = DP_TRAINING_PATTERN_3;
3829 else if (intel_dp->link_rate == 540000)
3830 DRM_ERROR("5.4 Gbps link rate without HBR2/TPS3 support\n");
3831
3832 /* channel equalization */
3833 if (!intel_dp_set_link_train(intel_dp, &DP,
3834 training_pattern |
3835 DP_LINK_SCRAMBLING_DISABLE)) {
3836 DRM_ERROR("failed to start channel equalization\n");
3837 return;
3838 }
3839
3840 tries = 0;
3841 cr_tries = 0;
3842 channel_eq = false;
3843 for (;;) {
3844 uint8_t link_status[DP_LINK_STATUS_SIZE];
3845
3846 if (cr_tries > 5) {
3847 DRM_ERROR("failed to train DP, aborting\n");
3848 break;
3849 }
3850
3851 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3852 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3853 DRM_ERROR("failed to get link status\n");
3854 break;
3855 }
3856
3857 /* Make sure clock is still ok */
3858 if (!drm_dp_clock_recovery_ok(link_status,
3859 intel_dp->lane_count)) {
3860 intel_dp->train_set_valid = false;
3861 intel_dp_link_training_clock_recovery(intel_dp);
3862 intel_dp_set_link_train(intel_dp, &DP,
3863 training_pattern |
3864 DP_LINK_SCRAMBLING_DISABLE);
3865 cr_tries++;
3866 continue;
3867 }
3868
3869 if (drm_dp_channel_eq_ok(link_status,
3870 intel_dp->lane_count)) {
3871 channel_eq = true;
3872 break;
3873 }
3874
3875 /* Try 5 times, then try clock recovery if that fails */
3876 if (tries > 5) {
3877 intel_dp->train_set_valid = false;
3878 intel_dp_link_training_clock_recovery(intel_dp);
3879 intel_dp_set_link_train(intel_dp, &DP,
3880 training_pattern |
3881 DP_LINK_SCRAMBLING_DISABLE);
3882 tries = 0;
3883 cr_tries++;
3884 continue;
3885 }
3886
3887 /* Update training set as requested by target */
3888 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3889 DRM_ERROR("failed to update link training\n");
3890 break;
3891 }
3892 ++tries;
3893 }
3894
3895 intel_dp_set_idle_link_train(intel_dp);
3896
3897 intel_dp->DP = DP;
3898
3899 if (channel_eq) {
3900 intel_dp->train_set_valid = true;
3901 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3902 }
3903}
3904
3905void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3906{
3907 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3908 DP_TRAINING_PATTERN_DISABLE);
3909}
3910
3911void
3912intel_dp_start_link_train(struct intel_dp *intel_dp)
3913{
3914 intel_dp_link_training_clock_recovery(intel_dp);
3915 intel_dp_link_training_channel_equalization(intel_dp);
3916}
3917
3918static void 3762static void
3919intel_dp_link_down(struct intel_dp *intel_dp) 3763intel_dp_link_down(struct intel_dp *intel_dp)
3920{ 3764{
@@ -3957,6 +3801,13 @@ intel_dp_link_down(struct intel_dp *intel_dp)
3957 * matching HDMI port to be enabled on transcoder A. 3801 * matching HDMI port to be enabled on transcoder A.
3958 */ 3802 */
3959 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) { 3803 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3804 /*
3805 * We get CPU/PCH FIFO underruns on the other pipe when
3806 * doing the workaround. Sweep them under the rug.
3807 */
3808 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3809 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3810
3960 /* always enable with pattern 1 (as per spec) */ 3811 /* always enable with pattern 1 (as per spec) */
3961 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK); 3812 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3962 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1; 3813 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
@@ -3966,9 +3817,15 @@ intel_dp_link_down(struct intel_dp *intel_dp)
3966 DP &= ~DP_PORT_EN; 3817 DP &= ~DP_PORT_EN;
3967 I915_WRITE(intel_dp->output_reg, DP); 3818 I915_WRITE(intel_dp->output_reg, DP);
3968 POSTING_READ(intel_dp->output_reg); 3819 POSTING_READ(intel_dp->output_reg);
3820
3821 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
3822 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3823 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3969 } 3824 }
3970 3825
3971 msleep(intel_dp->panel_power_down_delay); 3826 msleep(intel_dp->panel_power_down_delay);
3827
3828 intel_dp->DP = DP;
3972} 3829}
3973 3830
3974static bool 3831static bool
@@ -4016,7 +3873,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
4016 } 3873 }
4017 3874
4018 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n", 3875 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
4019 yesno(intel_dp_source_supports_hbr2(dev)), 3876 yesno(intel_dp_source_supports_hbr2(intel_dp)),
4020 yesno(drm_dp_tps3_supported(intel_dp->dpcd))); 3877 yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
4021 3878
4022 /* Intermediate frequency support */ 3879 /* Intermediate frequency support */
@@ -4106,9 +3963,12 @@ intel_dp_probe_mst(struct intel_dp *intel_dp)
4106static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp) 3963static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
4107{ 3964{
4108 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3965 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3966 struct drm_device *dev = dig_port->base.base.dev;
4109 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc); 3967 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4110 u8 buf; 3968 u8 buf;
4111 int ret = 0; 3969 int ret = 0;
3970 int count = 0;
3971 int attempts = 10;
4112 3972
4113 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) { 3973 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4114 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n"); 3974 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
@@ -4123,7 +3983,22 @@ static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
4123 goto out; 3983 goto out;
4124 } 3984 }
4125 3985
4126 intel_dp->sink_crc.started = false; 3986 do {
3987 intel_wait_for_vblank(dev, intel_crtc->pipe);
3988
3989 if (drm_dp_dpcd_readb(&intel_dp->aux,
3990 DP_TEST_SINK_MISC, &buf) < 0) {
3991 ret = -EIO;
3992 goto out;
3993 }
3994 count = buf & DP_TEST_COUNT_MASK;
3995 } while (--attempts && count);
3996
3997 if (attempts == 0) {
3998 DRM_ERROR("TIMEOUT: Sink CRC counter is not zeroed\n");
3999 ret = -ETIMEDOUT;
4000 }
4001
4127 out: 4002 out:
4128 hsw_enable_ips(intel_crtc); 4003 hsw_enable_ips(intel_crtc);
4129 return ret; 4004 return ret;
@@ -4132,27 +4007,26 @@ static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
4132static int intel_dp_sink_crc_start(struct intel_dp *intel_dp) 4007static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4133{ 4008{
4134 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 4009 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4010 struct drm_device *dev = dig_port->base.base.dev;
4135 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc); 4011 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4136 u8 buf; 4012 u8 buf;
4137 int ret; 4013 int ret;
4138 4014
4139 if (intel_dp->sink_crc.started) {
4140 ret = intel_dp_sink_crc_stop(intel_dp);
4141 if (ret)
4142 return ret;
4143 }
4144
4145 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) 4015 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4146 return -EIO; 4016 return -EIO;
4147 4017
4148 if (!(buf & DP_TEST_CRC_SUPPORTED)) 4018 if (!(buf & DP_TEST_CRC_SUPPORTED))
4149 return -ENOTTY; 4019 return -ENOTTY;
4150 4020
4151 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4152
4153 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) 4021 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4154 return -EIO; 4022 return -EIO;
4155 4023
4024 if (buf & DP_TEST_SINK_START) {
4025 ret = intel_dp_sink_crc_stop(intel_dp);
4026 if (ret)
4027 return ret;
4028 }
4029
4156 hsw_disable_ips(intel_crtc); 4030 hsw_disable_ips(intel_crtc);
4157 4031
4158 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 4032 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
@@ -4161,7 +4035,7 @@ static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4161 return -EIO; 4035 return -EIO;
4162 } 4036 }
4163 4037
4164 intel_dp->sink_crc.started = true; 4038 intel_wait_for_vblank(dev, intel_crtc->pipe);
4165 return 0; 4039 return 0;
4166} 4040}
4167 4041
@@ -4173,7 +4047,6 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4173 u8 buf; 4047 u8 buf;
4174 int count, ret; 4048 int count, ret;
4175 int attempts = 6; 4049 int attempts = 6;
4176 bool old_equal_new;
4177 4050
4178 ret = intel_dp_sink_crc_start(intel_dp); 4051 ret = intel_dp_sink_crc_start(intel_dp);
4179 if (ret) 4052 if (ret)
@@ -4189,35 +4062,17 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4189 } 4062 }
4190 count = buf & DP_TEST_COUNT_MASK; 4063 count = buf & DP_TEST_COUNT_MASK;
4191 4064
4192 /* 4065 } while (--attempts && count == 0);
4193 * Count might be reset during the loop. In this case
4194 * last known count needs to be reset as well.
4195 */
4196 if (count == 0)
4197 intel_dp->sink_crc.last_count = 0;
4198
4199 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4200 ret = -EIO;
4201 goto stop;
4202 }
4203
4204 old_equal_new = (count == intel_dp->sink_crc.last_count &&
4205 !memcmp(intel_dp->sink_crc.last_crc, crc,
4206 6 * sizeof(u8)));
4207
4208 } while (--attempts && (count == 0 || old_equal_new));
4209
4210 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4211 memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8));
4212 4066
4213 if (attempts == 0) { 4067 if (attempts == 0) {
4214 if (old_equal_new) { 4068 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4215 DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n"); 4069 ret = -ETIMEDOUT;
4216 } else { 4070 goto stop;
4217 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n"); 4071 }
4218 ret = -ETIMEDOUT; 4072
4219 goto stop; 4073 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4220 } 4074 ret = -EIO;
4075 goto stop;
4221 } 4076 }
4222 4077
4223stop: 4078stop:
@@ -4317,13 +4172,6 @@ static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4317 uint8_t rxdata = 0; 4172 uint8_t rxdata = 0;
4318 int status = 0; 4173 int status = 0;
4319 4174
4320 intel_dp->compliance_test_active = 0;
4321 intel_dp->compliance_test_type = 0;
4322 intel_dp->compliance_test_data = 0;
4323
4324 intel_dp->aux.i2c_nack_count = 0;
4325 intel_dp->aux.i2c_defer_count = 0;
4326
4327 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1); 4175 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4328 if (status <= 0) { 4176 if (status <= 0) {
4329 DRM_DEBUG_KMS("Could not read test request from sink\n"); 4177 DRM_DEBUG_KMS("Could not read test request from sink\n");
@@ -4439,6 +4287,14 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
4439 4287
4440 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 4288 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4441 4289
4290 /*
4291 * Clearing compliance test variables to allow capturing
4292 * of values for next automated test request.
4293 */
4294 intel_dp->compliance_test_active = 0;
4295 intel_dp->compliance_test_type = 0;
4296 intel_dp->compliance_test_data = 0;
4297
4442 if (!intel_encoder->base.crtc) 4298 if (!intel_encoder->base.crtc)
4443 return; 4299 return;
4444 4300
@@ -4469,7 +4325,9 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
4469 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); 4325 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4470 } 4326 }
4471 4327
4472 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { 4328 /* if link training is requested we should perform it always */
4329 if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
4330 (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
4473 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", 4331 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4474 intel_encoder->base.name); 4332 intel_encoder->base.name);
4475 intel_dp_start_link_train(intel_dp); 4333 intel_dp_start_link_train(intel_dp);
@@ -4687,41 +4545,6 @@ bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4687 return g4x_digital_port_connected(dev_priv, port); 4545 return g4x_digital_port_connected(dev_priv, port);
4688} 4546}
4689 4547
4690static enum drm_connector_status
4691ironlake_dp_detect(struct intel_dp *intel_dp)
4692{
4693 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4694 struct drm_i915_private *dev_priv = dev->dev_private;
4695 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4696
4697 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
4698 return connector_status_disconnected;
4699
4700 return intel_dp_detect_dpcd(intel_dp);
4701}
4702
4703static enum drm_connector_status
4704g4x_dp_detect(struct intel_dp *intel_dp)
4705{
4706 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4707 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4708
4709 /* Can't disconnect eDP, but you can close the lid... */
4710 if (is_edp(intel_dp)) {
4711 enum drm_connector_status status;
4712
4713 status = intel_panel_detect(dev);
4714 if (status == connector_status_unknown)
4715 status = connector_status_connected;
4716 return status;
4717 }
4718
4719 if (!intel_digital_port_connected(dev->dev_private, intel_dig_port))
4720 return connector_status_disconnected;
4721
4722 return intel_dp_detect_dpcd(intel_dp);
4723}
4724
4725static struct edid * 4548static struct edid *
4726intel_dp_get_edid(struct intel_dp *intel_dp) 4549intel_dp_get_edid(struct intel_dp *intel_dp)
4727{ 4550{
@@ -4765,26 +4588,6 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
4765 intel_dp->has_audio = false; 4588 intel_dp->has_audio = false;
4766} 4589}
4767 4590
4768static enum intel_display_power_domain
4769intel_dp_power_get(struct intel_dp *dp)
4770{
4771 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4772 enum intel_display_power_domain power_domain;
4773
4774 power_domain = intel_display_port_power_domain(encoder);
4775 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4776
4777 return power_domain;
4778}
4779
4780static void
4781intel_dp_power_put(struct intel_dp *dp,
4782 enum intel_display_power_domain power_domain)
4783{
4784 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4785 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4786}
4787
4788static enum drm_connector_status 4591static enum drm_connector_status
4789intel_dp_detect(struct drm_connector *connector, bool force) 4592intel_dp_detect(struct drm_connector *connector, bool force)
4790{ 4593{
@@ -4808,17 +4611,25 @@ intel_dp_detect(struct drm_connector *connector, bool force)
4808 return connector_status_disconnected; 4611 return connector_status_disconnected;
4809 } 4612 }
4810 4613
4811 power_domain = intel_dp_power_get(intel_dp); 4614 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4615 intel_display_power_get(to_i915(dev), power_domain);
4812 4616
4813 /* Can't disconnect eDP, but you can close the lid... */ 4617 /* Can't disconnect eDP, but you can close the lid... */
4814 if (is_edp(intel_dp)) 4618 if (is_edp(intel_dp))
4815 status = edp_detect(intel_dp); 4619 status = edp_detect(intel_dp);
4816 else if (HAS_PCH_SPLIT(dev)) 4620 else if (intel_digital_port_connected(to_i915(dev),
4817 status = ironlake_dp_detect(intel_dp); 4621 dp_to_dig_port(intel_dp)))
4622 status = intel_dp_detect_dpcd(intel_dp);
4818 else 4623 else
4819 status = g4x_dp_detect(intel_dp); 4624 status = connector_status_disconnected;
4820 if (status != connector_status_connected) 4625
4626 if (status != connector_status_connected) {
4627 intel_dp->compliance_test_active = 0;
4628 intel_dp->compliance_test_type = 0;
4629 intel_dp->compliance_test_data = 0;
4630
4821 goto out; 4631 goto out;
4632 }
4822 4633
4823 intel_dp_probe_oui(intel_dp); 4634 intel_dp_probe_oui(intel_dp);
4824 4635
@@ -4832,6 +4643,14 @@ intel_dp_detect(struct drm_connector *connector, bool force)
4832 goto out; 4643 goto out;
4833 } 4644 }
4834 4645
4646 /*
4647 * Clearing NACK and defer counts to get their exact values
4648 * while reading EDID which are required by Compliance tests
4649 * 4.2.2.4 and 4.2.2.5
4650 */
4651 intel_dp->aux.i2c_nack_count = 0;
4652 intel_dp->aux.i2c_defer_count = 0;
4653
4835 intel_dp_set_edid(intel_dp); 4654 intel_dp_set_edid(intel_dp);
4836 4655
4837 if (intel_encoder->type != INTEL_OUTPUT_EDP) 4656 if (intel_encoder->type != INTEL_OUTPUT_EDP)
@@ -4853,7 +4672,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
4853 } 4672 }
4854 4673
4855out: 4674out:
4856 intel_dp_power_put(intel_dp, power_domain); 4675 intel_display_power_put(to_i915(dev), power_domain);
4857 return status; 4676 return status;
4858} 4677}
4859 4678
@@ -4862,6 +4681,7 @@ intel_dp_force(struct drm_connector *connector)
4862{ 4681{
4863 struct intel_dp *intel_dp = intel_attached_dp(connector); 4682 struct intel_dp *intel_dp = intel_attached_dp(connector);
4864 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; 4683 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4684 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
4865 enum intel_display_power_domain power_domain; 4685 enum intel_display_power_domain power_domain;
4866 4686
4867 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 4687 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
@@ -4871,11 +4691,12 @@ intel_dp_force(struct drm_connector *connector)
4871 if (connector->status != connector_status_connected) 4691 if (connector->status != connector_status_connected)
4872 return; 4692 return;
4873 4693
4874 power_domain = intel_dp_power_get(intel_dp); 4694 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4695 intel_display_power_get(dev_priv, power_domain);
4875 4696
4876 intel_dp_set_edid(intel_dp); 4697 intel_dp_set_edid(intel_dp);
4877 4698
4878 intel_dp_power_put(intel_dp, power_domain); 4699 intel_display_power_put(dev_priv, power_domain);
4879 4700
4880 if (intel_encoder->type != INTEL_OUTPUT_EDP) 4701 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4881 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 4702 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
@@ -5034,7 +4855,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
5034 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 4855 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
5035 struct intel_dp *intel_dp = &intel_dig_port->dp; 4856 struct intel_dp *intel_dp = &intel_dig_port->dp;
5036 4857
5037 drm_dp_aux_unregister(&intel_dp->aux); 4858 intel_dp_aux_fini(intel_dp);
5038 intel_dp_mst_encoder_cleanup(intel_dig_port); 4859 intel_dp_mst_encoder_cleanup(intel_dig_port);
5039 if (is_edp(intel_dp)) { 4860 if (is_edp(intel_dp)) {
5040 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 4861 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
@@ -5091,7 +4912,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
5091 * indefinitely. 4912 * indefinitely.
5092 */ 4913 */
5093 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n"); 4914 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
5094 power_domain = intel_display_port_power_domain(&intel_dig_port->base); 4915 power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
5095 intel_display_power_get(dev_priv, power_domain); 4916 intel_display_power_get(dev_priv, power_domain);
5096 4917
5097 edp_panel_vdd_schedule_off(intel_dp); 4918 edp_panel_vdd_schedule_off(intel_dp);
@@ -5153,7 +4974,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5153 enum intel_display_power_domain power_domain; 4974 enum intel_display_power_domain power_domain;
5154 enum irqreturn ret = IRQ_NONE; 4975 enum irqreturn ret = IRQ_NONE;
5155 4976
5156 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP) 4977 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
4978 intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
5157 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT; 4979 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
5158 4980
5159 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) { 4981 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
@@ -5172,7 +4994,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5172 port_name(intel_dig_port->port), 4994 port_name(intel_dig_port->port),
5173 long_hpd ? "long" : "short"); 4995 long_hpd ? "long" : "short");
5174 4996
5175 power_domain = intel_display_port_power_domain(intel_encoder); 4997 power_domain = intel_display_port_aux_power_domain(intel_encoder);
5176 intel_display_power_get(dev_priv, power_domain); 4998 intel_display_power_get(dev_priv, power_domain);
5177 4999
5178 if (long_hpd) { 5000 if (long_hpd) {
@@ -5223,25 +5045,6 @@ put_power:
5223 return ret; 5045 return ret;
5224} 5046}
5225 5047
5226/* Return which DP Port should be selected for Transcoder DP control */
5227int
5228intel_trans_dp_port_sel(struct drm_crtc *crtc)
5229{
5230 struct drm_device *dev = crtc->dev;
5231 struct intel_encoder *intel_encoder;
5232 struct intel_dp *intel_dp;
5233
5234 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5235 intel_dp = enc_to_intel_dp(&intel_encoder->base);
5236
5237 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5238 intel_encoder->type == INTEL_OUTPUT_EDP)
5239 return intel_dp->output_reg;
5240 }
5241
5242 return -1;
5243}
5244
5245/* check the VBT to see whether the eDP is on another port */ 5048/* check the VBT to see whether the eDP is on another port */
5246bool intel_dp_is_edp(struct drm_device *dev, enum port port) 5049bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5247{ 5050{
@@ -5313,7 +5116,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5313 struct edp_power_seq cur, vbt, spec, 5116 struct edp_power_seq cur, vbt, spec,
5314 *final = &intel_dp->pps_delays; 5117 *final = &intel_dp->pps_delays;
5315 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0; 5118 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5316 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0; 5119 i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
5317 5120
5318 lockdep_assert_held(&dev_priv->pps_mutex); 5121 lockdep_assert_held(&dev_priv->pps_mutex);
5319 5122
@@ -5435,7 +5238,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5435 struct drm_i915_private *dev_priv = dev->dev_private; 5238 struct drm_i915_private *dev_priv = dev->dev_private;
5436 u32 pp_on, pp_off, pp_div, port_sel = 0; 5239 u32 pp_on, pp_off, pp_div, port_sel = 0;
5437 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev); 5240 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5438 int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg; 5241 i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
5439 enum port port = dp_to_dig_port(intel_dp)->port; 5242 enum port port = dp_to_dig_port(intel_dp)->port;
5440 const struct edp_power_seq *seq = &intel_dp->pps_delays; 5243 const struct edp_power_seq *seq = &intel_dp->pps_delays;
5441 5244
@@ -5597,7 +5400,7 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5597 DRM_ERROR("Unsupported refreshrate type\n"); 5400 DRM_ERROR("Unsupported refreshrate type\n");
5598 } 5401 }
5599 } else if (INTEL_INFO(dev)->gen > 6) { 5402 } else if (INTEL_INFO(dev)->gen > 6) {
5600 u32 reg = PIPECONF(intel_crtc->config->cpu_transcoder); 5403 i915_reg_t reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5601 u32 val; 5404 u32 val;
5602 5405
5603 val = I915_READ(reg); 5406 val = I915_READ(reg);
@@ -6015,7 +5818,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
6015 struct drm_device *dev = intel_encoder->base.dev; 5818 struct drm_device *dev = intel_encoder->base.dev;
6016 struct drm_i915_private *dev_priv = dev->dev_private; 5819 struct drm_i915_private *dev_priv = dev->dev_private;
6017 enum port port = intel_dig_port->port; 5820 enum port port = intel_dig_port->port;
6018 int type; 5821 int type, ret;
6019 5822
6020 intel_dp->pps_pipe = INVALID_PIPE; 5823 intel_dp->pps_pipe = INVALID_PIPE;
6021 5824
@@ -6036,6 +5839,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
6036 else 5839 else
6037 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl; 5840 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
6038 5841
5842 if (HAS_DDI(dev))
5843 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5844
6039 /* Preserve the current hw state. */ 5845 /* Preserve the current hw state. */
6040 intel_dp->DP = I915_READ(intel_dp->output_reg); 5846 intel_dp->DP = I915_READ(intel_dp->output_reg);
6041 intel_dp->attached_connector = intel_connector; 5847 intel_dp->attached_connector = intel_connector;
@@ -6087,7 +5893,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
6087 break; 5893 break;
6088 case PORT_B: 5894 case PORT_B:
6089 intel_encoder->hpd_pin = HPD_PORT_B; 5895 intel_encoder->hpd_pin = HPD_PORT_B;
6090 if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0)) 5896 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
6091 intel_encoder->hpd_pin = HPD_PORT_A; 5897 intel_encoder->hpd_pin = HPD_PORT_A;
6092 break; 5898 break;
6093 case PORT_C: 5899 case PORT_C:
@@ -6113,7 +5919,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
6113 pps_unlock(intel_dp); 5919 pps_unlock(intel_dp);
6114 } 5920 }
6115 5921
6116 intel_dp_aux_init(intel_dp, intel_connector); 5922 ret = intel_dp_aux_init(intel_dp, intel_connector);
5923 if (ret)
5924 goto fail;
6117 5925
6118 /* init MST on ports that can support it */ 5926 /* init MST on ports that can support it */
6119 if (HAS_DP_MST(dev) && 5927 if (HAS_DP_MST(dev) &&
@@ -6122,20 +5930,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
6122 intel_connector->base.base.id); 5930 intel_connector->base.base.id);
6123 5931
6124 if (!intel_edp_init_connector(intel_dp, intel_connector)) { 5932 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
6125 drm_dp_aux_unregister(&intel_dp->aux); 5933 intel_dp_aux_fini(intel_dp);
6126 if (is_edp(intel_dp)) { 5934 intel_dp_mst_encoder_cleanup(intel_dig_port);
6127 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 5935 goto fail;
6128 /*
6129 * vdd might still be enabled do to the delayed vdd off.
6130 * Make sure vdd is actually turned off here.
6131 */
6132 pps_lock(intel_dp);
6133 edp_panel_vdd_off_sync(intel_dp);
6134 pps_unlock(intel_dp);
6135 }
6136 drm_connector_unregister(connector);
6137 drm_connector_cleanup(connector);
6138 return false;
6139 } 5936 }
6140 5937
6141 intel_dp_add_properties(intel_dp, connector); 5938 intel_dp_add_properties(intel_dp, connector);
@@ -6152,10 +5949,27 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
6152 i915_debugfs_connector_add(connector); 5949 i915_debugfs_connector_add(connector);
6153 5950
6154 return true; 5951 return true;
5952
5953fail:
5954 if (is_edp(intel_dp)) {
5955 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5956 /*
5957 * vdd might still be enabled do to the delayed vdd off.
5958 * Make sure vdd is actually turned off here.
5959 */
5960 pps_lock(intel_dp);
5961 edp_panel_vdd_off_sync(intel_dp);
5962 pps_unlock(intel_dp);
5963 }
5964 drm_connector_unregister(connector);
5965 drm_connector_cleanup(connector);
5966
5967 return false;
6155} 5968}
6156 5969
6157void 5970void
6158intel_dp_init(struct drm_device *dev, int output_reg, enum port port) 5971intel_dp_init(struct drm_device *dev,
5972 i915_reg_t output_reg, enum port port)
6159{ 5973{
6160 struct drm_i915_private *dev_priv = dev->dev_private; 5974 struct drm_i915_private *dev_priv = dev->dev_private;
6161 struct intel_digital_port *intel_dig_port; 5975 struct intel_digital_port *intel_dig_port;
@@ -6175,7 +5989,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
6175 encoder = &intel_encoder->base; 5989 encoder = &intel_encoder->base;
6176 5990
6177 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, 5991 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6178 DRM_MODE_ENCODER_TMDS); 5992 DRM_MODE_ENCODER_TMDS, NULL);
6179 5993
6180 intel_encoder->compute_config = intel_dp_compute_config; 5994 intel_encoder->compute_config = intel_dp_compute_config;
6181 intel_encoder->disable = intel_disable_dp; 5995 intel_encoder->disable = intel_disable_dp;
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
new file mode 100644
index 000000000000..88887938e0bf
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
@@ -0,0 +1,323 @@
1/*
2 * Copyright © 2008-2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include "intel_drv.h"
25
26static void
27intel_get_adjust_train(struct intel_dp *intel_dp,
28 const uint8_t link_status[DP_LINK_STATUS_SIZE])
29{
30 uint8_t v = 0;
31 uint8_t p = 0;
32 int lane;
33 uint8_t voltage_max;
34 uint8_t preemph_max;
35
36 for (lane = 0; lane < intel_dp->lane_count; lane++) {
37 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
38 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
39
40 if (this_v > v)
41 v = this_v;
42 if (this_p > p)
43 p = this_p;
44 }
45
46 voltage_max = intel_dp_voltage_max(intel_dp);
47 if (v >= voltage_max)
48 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
49
50 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
51 if (p >= preemph_max)
52 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
53
54 for (lane = 0; lane < 4; lane++)
55 intel_dp->train_set[lane] = v | p;
56}
57
58static bool
59intel_dp_set_link_train(struct intel_dp *intel_dp,
60 uint8_t dp_train_pat)
61{
62 uint8_t buf[sizeof(intel_dp->train_set) + 1];
63 int ret, len;
64
65 intel_dp_program_link_training_pattern(intel_dp, dp_train_pat);
66
67 buf[0] = dp_train_pat;
68 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
69 DP_TRAINING_PATTERN_DISABLE) {
70 /* don't write DP_TRAINING_LANEx_SET on disable */
71 len = 1;
72 } else {
73 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
74 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
75 len = intel_dp->lane_count + 1;
76 }
77
78 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
79 buf, len);
80
81 return ret == len;
82}
83
84static bool
85intel_dp_reset_link_train(struct intel_dp *intel_dp,
86 uint8_t dp_train_pat)
87{
88 if (!intel_dp->train_set_valid)
89 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
90 intel_dp_set_signal_levels(intel_dp);
91 return intel_dp_set_link_train(intel_dp, dp_train_pat);
92}
93
94static bool
95intel_dp_update_link_train(struct intel_dp *intel_dp)
96{
97 int ret;
98
99 intel_dp_set_signal_levels(intel_dp);
100
101 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
102 intel_dp->train_set, intel_dp->lane_count);
103
104 return ret == intel_dp->lane_count;
105}
106
107/* Enable corresponding port and start training pattern 1 */
108static void
109intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
110{
111 int i;
112 uint8_t voltage;
113 int voltage_tries, loop_tries;
114 uint8_t link_config[2];
115 uint8_t link_bw, rate_select;
116
117 if (intel_dp->prepare_link_retrain)
118 intel_dp->prepare_link_retrain(intel_dp);
119
120 intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
121 &link_bw, &rate_select);
122
123 /* Write the link configuration data */
124 link_config[0] = link_bw;
125 link_config[1] = intel_dp->lane_count;
126 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
127 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
128 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
129 if (intel_dp->num_sink_rates)
130 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
131 &rate_select, 1);
132
133 link_config[0] = 0;
134 link_config[1] = DP_SET_ANSI_8B10B;
135 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
136
137 intel_dp->DP |= DP_PORT_EN;
138
139 /* clock recovery */
140 if (!intel_dp_reset_link_train(intel_dp,
141 DP_TRAINING_PATTERN_1 |
142 DP_LINK_SCRAMBLING_DISABLE)) {
143 DRM_ERROR("failed to enable link training\n");
144 return;
145 }
146
147 voltage = 0xff;
148 voltage_tries = 0;
149 loop_tries = 0;
150 for (;;) {
151 uint8_t link_status[DP_LINK_STATUS_SIZE];
152
153 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
154 if (!intel_dp_get_link_status(intel_dp, link_status)) {
155 DRM_ERROR("failed to get link status\n");
156 break;
157 }
158
159 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
160 DRM_DEBUG_KMS("clock recovery OK\n");
161 break;
162 }
163
164 /*
165 * if we used previously trained voltage and pre-emphasis values
166 * and we don't get clock recovery, reset link training values
167 */
168 if (intel_dp->train_set_valid) {
169 DRM_DEBUG_KMS("clock recovery not ok, reset");
170 /* clear the flag as we are not reusing train set */
171 intel_dp->train_set_valid = false;
172 if (!intel_dp_reset_link_train(intel_dp,
173 DP_TRAINING_PATTERN_1 |
174 DP_LINK_SCRAMBLING_DISABLE)) {
175 DRM_ERROR("failed to enable link training\n");
176 return;
177 }
178 continue;
179 }
180
181 /* Check to see if we've tried the max voltage */
182 for (i = 0; i < intel_dp->lane_count; i++)
183 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
184 break;
185 if (i == intel_dp->lane_count) {
186 ++loop_tries;
187 if (loop_tries == 5) {
188 DRM_ERROR("too many full retries, give up\n");
189 break;
190 }
191 intel_dp_reset_link_train(intel_dp,
192 DP_TRAINING_PATTERN_1 |
193 DP_LINK_SCRAMBLING_DISABLE);
194 voltage_tries = 0;
195 continue;
196 }
197
198 /* Check to see if we've tried the same voltage 5 times */
199 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
200 ++voltage_tries;
201 if (voltage_tries == 5) {
202 DRM_ERROR("too many voltage retries, give up\n");
203 break;
204 }
205 } else
206 voltage_tries = 0;
207 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
208
209 /* Update training set as requested by target */
210 intel_get_adjust_train(intel_dp, link_status);
211 if (!intel_dp_update_link_train(intel_dp)) {
212 DRM_ERROR("failed to update link training\n");
213 break;
214 }
215 }
216}
217
218static void
219intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
220{
221 bool channel_eq = false;
222 int tries, cr_tries;
223 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
224
225 /*
226 * Training Pattern 3 for HBR2 or 1.2 devices that support it.
227 *
228 * Intel platforms that support HBR2 also support TPS3. TPS3 support is
229 * also mandatory for downstream devices that support HBR2.
230 *
231 * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
232 * supported but still not enabled.
233 */
234 if (intel_dp_source_supports_hbr2(intel_dp) &&
235 drm_dp_tps3_supported(intel_dp->dpcd))
236 training_pattern = DP_TRAINING_PATTERN_3;
237 else if (intel_dp->link_rate == 540000)
238 DRM_ERROR("5.4 Gbps link rate without HBR2/TPS3 support\n");
239
240 /* channel equalization */
241 if (!intel_dp_set_link_train(intel_dp,
242 training_pattern |
243 DP_LINK_SCRAMBLING_DISABLE)) {
244 DRM_ERROR("failed to start channel equalization\n");
245 return;
246 }
247
248 tries = 0;
249 cr_tries = 0;
250 channel_eq = false;
251 for (;;) {
252 uint8_t link_status[DP_LINK_STATUS_SIZE];
253
254 if (cr_tries > 5) {
255 DRM_ERROR("failed to train DP, aborting\n");
256 break;
257 }
258
259 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
260 if (!intel_dp_get_link_status(intel_dp, link_status)) {
261 DRM_ERROR("failed to get link status\n");
262 break;
263 }
264
265 /* Make sure clock is still ok */
266 if (!drm_dp_clock_recovery_ok(link_status,
267 intel_dp->lane_count)) {
268 intel_dp->train_set_valid = false;
269 intel_dp_link_training_clock_recovery(intel_dp);
270 intel_dp_set_link_train(intel_dp,
271 training_pattern |
272 DP_LINK_SCRAMBLING_DISABLE);
273 cr_tries++;
274 continue;
275 }
276
277 if (drm_dp_channel_eq_ok(link_status,
278 intel_dp->lane_count)) {
279 channel_eq = true;
280 break;
281 }
282
283 /* Try 5 times, then try clock recovery if that fails */
284 if (tries > 5) {
285 intel_dp->train_set_valid = false;
286 intel_dp_link_training_clock_recovery(intel_dp);
287 intel_dp_set_link_train(intel_dp,
288 training_pattern |
289 DP_LINK_SCRAMBLING_DISABLE);
290 tries = 0;
291 cr_tries++;
292 continue;
293 }
294
295 /* Update training set as requested by target */
296 intel_get_adjust_train(intel_dp, link_status);
297 if (!intel_dp_update_link_train(intel_dp)) {
298 DRM_ERROR("failed to update link training\n");
299 break;
300 }
301 ++tries;
302 }
303
304 intel_dp_set_idle_link_train(intel_dp);
305
306 if (channel_eq) {
307 intel_dp->train_set_valid = true;
308 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
309 }
310}
311
312void intel_dp_stop_link_train(struct intel_dp *intel_dp)
313{
314 intel_dp_set_link_train(intel_dp,
315 DP_TRAINING_PATTERN_DISABLE);
316}
317
318void
319intel_dp_start_link_train(struct intel_dp *intel_dp)
320{
321 intel_dp_link_training_clock_recovery(intel_dp);
322 intel_dp_link_training_channel_equalization(intel_dp);
323}
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 0639275fc471..e8d369d0a713 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -173,20 +173,14 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
173 intel_mst->port = found->port; 173 intel_mst->port = found->port;
174 174
175 if (intel_dp->active_mst_links == 0) { 175 if (intel_dp->active_mst_links == 0) {
176 enum port port = intel_ddi_get_encoder_port(encoder); 176 intel_ddi_clk_select(encoder, intel_crtc->config);
177 177
178 intel_dp_set_link_params(intel_dp, intel_crtc->config); 178 intel_dp_set_link_params(intel_dp, intel_crtc->config);
179 179
180 /* FIXME: add support for SKL */
181 if (INTEL_INFO(dev)->gen < 9)
182 I915_WRITE(PORT_CLK_SEL(port),
183 intel_crtc->config->ddi_pll_sel);
184
185 intel_ddi_init_dp_buf_reg(&intel_dig_port->base); 180 intel_ddi_init_dp_buf_reg(&intel_dig_port->base);
186 181
187 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 182 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
188 183
189
190 intel_dp_start_link_train(intel_dp); 184 intel_dp_start_link_train(intel_dp);
191 intel_dp_stop_link_train(intel_dp); 185 intel_dp_stop_link_train(intel_dp);
192 } 186 }
@@ -414,7 +408,10 @@ static void intel_connector_add_to_fbdev(struct intel_connector *connector)
414{ 408{
415#ifdef CONFIG_DRM_FBDEV_EMULATION 409#ifdef CONFIG_DRM_FBDEV_EMULATION
416 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 410 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
417 drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper, &connector->base); 411
412 if (dev_priv->fbdev)
413 drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper,
414 &connector->base);
418#endif 415#endif
419} 416}
420 417
@@ -422,7 +419,10 @@ static void intel_connector_remove_from_fbdev(struct intel_connector *connector)
422{ 419{
423#ifdef CONFIG_DRM_FBDEV_EMULATION 420#ifdef CONFIG_DRM_FBDEV_EMULATION
424 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 421 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
425 drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper, &connector->base); 422
423 if (dev_priv->fbdev)
424 drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper,
425 &connector->base);
426#endif 426#endif
427} 427}
428 428
@@ -536,7 +536,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
536 intel_mst->primary = intel_dig_port; 536 intel_mst->primary = intel_dig_port;
537 537
538 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs, 538 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs,
539 DRM_MODE_ENCODER_DPMST); 539 DRM_MODE_ENCODER_DPMST, NULL);
540 540
541 intel_encoder->type = INTEL_OUTPUT_DP_MST; 541 intel_encoder->type = INTEL_OUTPUT_DP_MST;
542 intel_encoder->crtc_mask = 0x7; 542 intel_encoder->crtc_mask = 0x7;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 0598932ce623..50f83d220249 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -123,8 +123,6 @@ struct intel_framebuffer {
123struct intel_fbdev { 123struct intel_fbdev {
124 struct drm_fb_helper helper; 124 struct drm_fb_helper helper;
125 struct intel_framebuffer *fb; 125 struct intel_framebuffer *fb;
126 struct list_head fbdev_list;
127 struct drm_display_mode *our_mode;
128 int preferred_bpp; 126 int preferred_bpp;
129}; 127};
130 128
@@ -250,6 +248,7 @@ struct intel_atomic_state {
250 unsigned int cdclk; 248 unsigned int cdclk;
251 bool dpll_set; 249 bool dpll_set;
252 struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS]; 250 struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
251 struct intel_wm_config wm_config;
253}; 252};
254 253
255struct intel_plane_state { 254struct intel_plane_state {
@@ -280,6 +279,9 @@ struct intel_plane_state {
280 int scaler_id; 279 int scaler_id;
281 280
282 struct drm_intel_sprite_colorkey ckey; 281 struct drm_intel_sprite_colorkey ckey;
282
283 /* async flip related structures */
284 struct drm_i915_gem_request *wait_req;
283}; 285};
284 286
285struct intel_initial_plane_config { 287struct intel_initial_plane_config {
@@ -334,6 +336,21 @@ struct intel_crtc_scaler_state {
334/* drm_mode->private_flags */ 336/* drm_mode->private_flags */
335#define I915_MODE_FLAG_INHERITED 1 337#define I915_MODE_FLAG_INHERITED 1
336 338
339struct intel_pipe_wm {
340 struct intel_wm_level wm[5];
341 uint32_t linetime;
342 bool fbc_wm_enabled;
343 bool pipe_enabled;
344 bool sprites_enabled;
345 bool sprites_scaled;
346};
347
348struct skl_pipe_wm {
349 struct skl_wm_level wm[8];
350 struct skl_wm_level trans_wm;
351 uint32_t linetime;
352};
353
337struct intel_crtc_state { 354struct intel_crtc_state {
338 struct drm_crtc_state base; 355 struct drm_crtc_state base;
339 356
@@ -376,6 +393,9 @@ struct intel_crtc_state {
376 * accordingly. */ 393 * accordingly. */
377 bool has_dp_encoder; 394 bool has_dp_encoder;
378 395
396 /* DSI has special cases */
397 bool has_dsi_encoder;
398
379 /* Whether we should send NULL infoframes. Required for audio. */ 399 /* Whether we should send NULL infoframes. Required for audio. */
380 bool has_hdmi_sink; 400 bool has_hdmi_sink;
381 401
@@ -468,6 +488,20 @@ struct intel_crtc_state {
468 488
469 /* w/a for waiting 2 vblanks during crtc enable */ 489 /* w/a for waiting 2 vblanks during crtc enable */
470 enum pipe hsw_workaround_pipe; 490 enum pipe hsw_workaround_pipe;
491
492 /* IVB sprite scaling w/a (WaCxSRDisabledForSpriteScaling:ivb) */
493 bool disable_lp_wm;
494
495 struct {
496 /*
497 * optimal watermarks, programmed post-vblank when this state
498 * is committed
499 */
500 union {
501 struct intel_pipe_wm ilk;
502 struct skl_pipe_wm skl;
503 } optimal;
504 } wm;
471}; 505};
472 506
473struct vlv_wm_state { 507struct vlv_wm_state {
@@ -479,26 +513,12 @@ struct vlv_wm_state {
479 bool cxsr; 513 bool cxsr;
480}; 514};
481 515
482struct intel_pipe_wm {
483 struct intel_wm_level wm[5];
484 uint32_t linetime;
485 bool fbc_wm_enabled;
486 bool pipe_enabled;
487 bool sprites_enabled;
488 bool sprites_scaled;
489};
490
491struct intel_mmio_flip { 516struct intel_mmio_flip {
492 struct work_struct work; 517 struct work_struct work;
493 struct drm_i915_private *i915; 518 struct drm_i915_private *i915;
494 struct drm_i915_gem_request *req; 519 struct drm_i915_gem_request *req;
495 struct intel_crtc *crtc; 520 struct intel_crtc *crtc;
496}; 521 unsigned int rotation;
497
498struct skl_pipe_wm {
499 struct skl_wm_level wm[8];
500 struct skl_wm_level trans_wm;
501 uint32_t linetime;
502}; 522};
503 523
504/* 524/*
@@ -509,13 +529,11 @@ struct skl_pipe_wm {
509 */ 529 */
510struct intel_crtc_atomic_commit { 530struct intel_crtc_atomic_commit {
511 /* Sleepable operations to perform before commit */ 531 /* Sleepable operations to perform before commit */
512 bool wait_for_flips;
513 bool disable_fbc; 532 bool disable_fbc;
514 bool disable_ips; 533 bool disable_ips;
515 bool disable_cxsr; 534 bool disable_cxsr;
516 bool pre_disable_primary; 535 bool pre_disable_primary;
517 bool update_wm_pre, update_wm_post; 536 bool update_wm_pre, update_wm_post;
518 unsigned disabled_planes;
519 537
520 /* Sleepable operations to perform after commit */ 538 /* Sleepable operations to perform after commit */
521 unsigned fb_bits; 539 unsigned fb_bits;
@@ -568,9 +586,10 @@ struct intel_crtc {
568 /* per-pipe watermark state */ 586 /* per-pipe watermark state */
569 struct { 587 struct {
570 /* watermarks currently being used */ 588 /* watermarks currently being used */
571 struct intel_pipe_wm active; 589 union {
572 /* SKL wm values currently in use */ 590 struct intel_pipe_wm ilk;
573 struct skl_pipe_wm skl_active; 591 struct skl_pipe_wm skl;
592 } active;
574 /* allow CxSR on this pipe */ 593 /* allow CxSR on this pipe */
575 bool cxsr_allowed; 594 bool cxsr_allowed;
576 } wm; 595 } wm;
@@ -678,7 +697,7 @@ struct cxsr_latency {
678#define intel_fb_obj(x) (x ? to_intel_framebuffer(x)->obj : NULL) 697#define intel_fb_obj(x) (x ? to_intel_framebuffer(x)->obj : NULL)
679 698
680struct intel_hdmi { 699struct intel_hdmi {
681 u32 hdmi_reg; 700 i915_reg_t hdmi_reg;
682 int ddc_bus; 701 int ddc_bus;
683 bool limited_color_range; 702 bool limited_color_range;
684 bool color_range_auto; 703 bool color_range_auto;
@@ -694,7 +713,8 @@ struct intel_hdmi {
694 void (*set_infoframes)(struct drm_encoder *encoder, 713 void (*set_infoframes)(struct drm_encoder *encoder,
695 bool enable, 714 bool enable,
696 const struct drm_display_mode *adjusted_mode); 715 const struct drm_display_mode *adjusted_mode);
697 bool (*infoframe_enabled)(struct drm_encoder *encoder); 716 bool (*infoframe_enabled)(struct drm_encoder *encoder,
717 const struct intel_crtc_state *pipe_config);
698}; 718};
699 719
700struct intel_dp_mst_encoder; 720struct intel_dp_mst_encoder;
@@ -720,15 +740,10 @@ enum link_m_n_set {
720 M2_N2 740 M2_N2
721}; 741};
722 742
723struct sink_crc {
724 bool started;
725 u8 last_crc[6];
726 int last_count;
727};
728
729struct intel_dp { 743struct intel_dp {
730 uint32_t output_reg; 744 i915_reg_t output_reg;
731 uint32_t aux_ch_ctl_reg; 745 i915_reg_t aux_ch_ctl_reg;
746 i915_reg_t aux_ch_data_reg[5];
732 uint32_t DP; 747 uint32_t DP;
733 int link_rate; 748 int link_rate;
734 uint8_t lane_count; 749 uint8_t lane_count;
@@ -742,7 +757,6 @@ struct intel_dp {
742 /* sink rates as reported by DP_SUPPORTED_LINK_RATES */ 757 /* sink rates as reported by DP_SUPPORTED_LINK_RATES */
743 uint8_t num_sink_rates; 758 uint8_t num_sink_rates;
744 int sink_rates[DP_MAX_SUPPORTED_RATES]; 759 int sink_rates[DP_MAX_SUPPORTED_RATES];
745 struct sink_crc sink_crc;
746 struct drm_dp_aux aux; 760 struct drm_dp_aux aux;
747 uint8_t train_set[4]; 761 uint8_t train_set[4];
748 int panel_power_up_delay; 762 int panel_power_up_delay;
@@ -784,6 +798,10 @@ struct intel_dp {
784 bool has_aux_irq, 798 bool has_aux_irq,
785 int send_bytes, 799 int send_bytes,
786 uint32_t aux_clock_divider); 800 uint32_t aux_clock_divider);
801
802 /* This is called before a link training is starterd */
803 void (*prepare_link_retrain)(struct intel_dp *intel_dp);
804
787 bool train_set_valid; 805 bool train_set_valid;
788 806
789 /* Displayport compliance testing */ 807 /* Displayport compliance testing */
@@ -943,7 +961,8 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
943 enum pipe pipe); 961 enum pipe pipe);
944void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, 962void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
945 enum transcoder pch_transcoder); 963 enum transcoder pch_transcoder);
946void i9xx_check_fifo_underruns(struct drm_i915_private *dev_priv); 964void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv);
965void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv);
947 966
948/* i915_irq.c */ 967/* i915_irq.c */
949void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); 968void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
@@ -974,6 +993,8 @@ void intel_crt_init(struct drm_device *dev);
974 993
975 994
976/* intel_ddi.c */ 995/* intel_ddi.c */
996void intel_ddi_clk_select(struct intel_encoder *encoder,
997 const struct intel_crtc_state *pipe_config);
977void intel_prepare_ddi(struct drm_device *dev); 998void intel_prepare_ddi(struct drm_device *dev);
978void hsw_fdi_link_train(struct drm_crtc *crtc); 999void hsw_fdi_link_train(struct drm_crtc *crtc);
979void intel_ddi_init(struct drm_device *dev, enum port port); 1000void intel_ddi_init(struct drm_device *dev, enum port port);
@@ -988,7 +1009,7 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
988bool intel_ddi_pll_select(struct intel_crtc *crtc, 1009bool intel_ddi_pll_select(struct intel_crtc *crtc,
989 struct intel_crtc_state *crtc_state); 1010 struct intel_crtc_state *crtc_state);
990void intel_ddi_set_pipe_settings(struct drm_crtc *crtc); 1011void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
991void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder); 1012void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
992bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); 1013bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
993void intel_ddi_fdi_disable(struct drm_crtc *crtc); 1014void intel_ddi_fdi_disable(struct drm_crtc *crtc);
994void intel_ddi_get_config(struct intel_encoder *encoder, 1015void intel_ddi_get_config(struct intel_encoder *encoder,
@@ -1056,6 +1077,15 @@ intel_wait_for_vblank(struct drm_device *dev, int pipe)
1056{ 1077{
1057 drm_wait_one_vblank(dev, pipe); 1078 drm_wait_one_vblank(dev, pipe);
1058} 1079}
1080static inline void
1081intel_wait_for_vblank_if_active(struct drm_device *dev, int pipe)
1082{
1083 const struct intel_crtc *crtc =
1084 to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
1085
1086 if (crtc->active)
1087 intel_wait_for_vblank(dev, pipe);
1088}
1059int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp); 1089int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
1060void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 1090void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1061 struct intel_digital_port *dport, 1091 struct intel_digital_port *dport,
@@ -1069,9 +1099,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
1069 struct drm_modeset_acquire_ctx *ctx); 1099 struct drm_modeset_acquire_ctx *ctx);
1070int intel_pin_and_fence_fb_obj(struct drm_plane *plane, 1100int intel_pin_and_fence_fb_obj(struct drm_plane *plane,
1071 struct drm_framebuffer *fb, 1101 struct drm_framebuffer *fb,
1072 const struct drm_plane_state *plane_state, 1102 const struct drm_plane_state *plane_state);
1073 struct intel_engine_cs *pipelined,
1074 struct drm_i915_gem_request **pipelined_request);
1075struct drm_framebuffer * 1103struct drm_framebuffer *
1076__intel_framebuffer_create(struct drm_device *dev, 1104__intel_framebuffer_create(struct drm_device *dev,
1077 struct drm_mode_fb_cmd2 *mode_cmd, 1105 struct drm_mode_fb_cmd2 *mode_cmd,
@@ -1152,7 +1180,10 @@ void broxton_ddi_phy_uninit(struct drm_device *dev);
1152void bxt_enable_dc9(struct drm_i915_private *dev_priv); 1180void bxt_enable_dc9(struct drm_i915_private *dev_priv);
1153void bxt_disable_dc9(struct drm_i915_private *dev_priv); 1181void bxt_disable_dc9(struct drm_i915_private *dev_priv);
1154void skl_init_cdclk(struct drm_i915_private *dev_priv); 1182void skl_init_cdclk(struct drm_i915_private *dev_priv);
1183int skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
1155void skl_uninit_cdclk(struct drm_i915_private *dev_priv); 1184void skl_uninit_cdclk(struct drm_i915_private *dev_priv);
1185void skl_enable_dc6(struct drm_i915_private *dev_priv);
1186void skl_disable_dc6(struct drm_i915_private *dev_priv);
1156void intel_dp_get_m_n(struct intel_crtc *crtc, 1187void intel_dp_get_m_n(struct intel_crtc *crtc,
1157 struct intel_crtc_state *pipe_config); 1188 struct intel_crtc_state *pipe_config);
1158void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n); 1189void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n);
@@ -1169,33 +1200,30 @@ void hsw_enable_ips(struct intel_crtc *crtc);
1169void hsw_disable_ips(struct intel_crtc *crtc); 1200void hsw_disable_ips(struct intel_crtc *crtc);
1170enum intel_display_power_domain 1201enum intel_display_power_domain
1171intel_display_port_power_domain(struct intel_encoder *intel_encoder); 1202intel_display_port_power_domain(struct intel_encoder *intel_encoder);
1203enum intel_display_power_domain
1204intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder);
1172void intel_mode_from_pipe_config(struct drm_display_mode *mode, 1205void intel_mode_from_pipe_config(struct drm_display_mode *mode,
1173 struct intel_crtc_state *pipe_config); 1206 struct intel_crtc_state *pipe_config);
1174void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
1175void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file); 1207void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file);
1176 1208
1177int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state); 1209int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
1178int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state); 1210int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
1179 1211
1180unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane, 1212u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
1181 struct drm_i915_gem_object *obj, 1213 struct drm_i915_gem_object *obj,
1182 unsigned int plane); 1214 unsigned int plane);
1183 1215
1184u32 skl_plane_ctl_format(uint32_t pixel_format); 1216u32 skl_plane_ctl_format(uint32_t pixel_format);
1185u32 skl_plane_ctl_tiling(uint64_t fb_modifier); 1217u32 skl_plane_ctl_tiling(uint64_t fb_modifier);
1186u32 skl_plane_ctl_rotation(unsigned int rotation); 1218u32 skl_plane_ctl_rotation(unsigned int rotation);
1187 1219
1188/* intel_csr.c */ 1220/* intel_csr.c */
1189void intel_csr_ucode_init(struct drm_device *dev); 1221void intel_csr_ucode_init(struct drm_i915_private *);
1190enum csr_state intel_csr_load_status_get(struct drm_i915_private *dev_priv); 1222void intel_csr_load_program(struct drm_i915_private *);
1191void intel_csr_load_status_set(struct drm_i915_private *dev_priv, 1223void intel_csr_ucode_fini(struct drm_i915_private *);
1192 enum csr_state state);
1193void intel_csr_load_program(struct drm_device *dev);
1194void intel_csr_ucode_fini(struct drm_device *dev);
1195void assert_csr_loaded(struct drm_i915_private *dev_priv);
1196 1224
1197/* intel_dp.c */ 1225/* intel_dp.c */
1198void intel_dp_init(struct drm_device *dev, int output_reg, enum port port); 1226void intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port);
1199bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port, 1227bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
1200 struct intel_connector *intel_connector); 1228 struct intel_connector *intel_connector);
1201void intel_dp_set_link_params(struct intel_dp *intel_dp, 1229void intel_dp_set_link_params(struct intel_dp *intel_dp,
@@ -1233,6 +1261,22 @@ bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
1233 struct intel_digital_port *port); 1261 struct intel_digital_port *port);
1234void hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config); 1262void hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config);
1235 1263
1264void
1265intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
1266 uint8_t dp_train_pat);
1267void
1268intel_dp_set_signal_levels(struct intel_dp *intel_dp);
1269void intel_dp_set_idle_link_train(struct intel_dp *intel_dp);
1270uint8_t
1271intel_dp_voltage_max(struct intel_dp *intel_dp);
1272uint8_t
1273intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing);
1274void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1275 uint8_t *link_bw, uint8_t *rate_select);
1276bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
1277bool
1278intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]);
1279
1236/* intel_dp_mst.c */ 1280/* intel_dp_mst.c */
1237int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); 1281int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
1238void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port); 1282void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
@@ -1247,7 +1291,7 @@ void intel_dvo_init(struct drm_device *dev);
1247/* legacy fbdev emulation in intel_fbdev.c */ 1291/* legacy fbdev emulation in intel_fbdev.c */
1248#ifdef CONFIG_DRM_FBDEV_EMULATION 1292#ifdef CONFIG_DRM_FBDEV_EMULATION
1249extern int intel_fbdev_init(struct drm_device *dev); 1293extern int intel_fbdev_init(struct drm_device *dev);
1250extern void intel_fbdev_initial_config(void *data, async_cookie_t cookie); 1294extern void intel_fbdev_initial_config_async(struct drm_device *dev);
1251extern void intel_fbdev_fini(struct drm_device *dev); 1295extern void intel_fbdev_fini(struct drm_device *dev);
1252extern void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous); 1296extern void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
1253extern void intel_fbdev_output_poll_changed(struct drm_device *dev); 1297extern void intel_fbdev_output_poll_changed(struct drm_device *dev);
@@ -1258,7 +1302,7 @@ static inline int intel_fbdev_init(struct drm_device *dev)
1258 return 0; 1302 return 0;
1259} 1303}
1260 1304
1261static inline void intel_fbdev_initial_config(void *data, async_cookie_t cookie) 1305static inline void intel_fbdev_initial_config_async(struct drm_device *dev)
1262{ 1306{
1263} 1307}
1264 1308
@@ -1276,9 +1320,11 @@ static inline void intel_fbdev_restore_mode(struct drm_device *dev)
1276#endif 1320#endif
1277 1321
1278/* intel_fbc.c */ 1322/* intel_fbc.c */
1279bool intel_fbc_enabled(struct drm_i915_private *dev_priv); 1323bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
1280void intel_fbc_update(struct drm_i915_private *dev_priv); 1324void intel_fbc_deactivate(struct intel_crtc *crtc);
1325void intel_fbc_update(struct intel_crtc *crtc);
1281void intel_fbc_init(struct drm_i915_private *dev_priv); 1326void intel_fbc_init(struct drm_i915_private *dev_priv);
1327void intel_fbc_enable(struct intel_crtc *crtc);
1282void intel_fbc_disable(struct drm_i915_private *dev_priv); 1328void intel_fbc_disable(struct drm_i915_private *dev_priv);
1283void intel_fbc_disable_crtc(struct intel_crtc *crtc); 1329void intel_fbc_disable_crtc(struct intel_crtc *crtc);
1284void intel_fbc_invalidate(struct drm_i915_private *dev_priv, 1330void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
@@ -1286,11 +1332,10 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
1286 enum fb_op_origin origin); 1332 enum fb_op_origin origin);
1287void intel_fbc_flush(struct drm_i915_private *dev_priv, 1333void intel_fbc_flush(struct drm_i915_private *dev_priv,
1288 unsigned int frontbuffer_bits, enum fb_op_origin origin); 1334 unsigned int frontbuffer_bits, enum fb_op_origin origin);
1289const char *intel_no_fbc_reason_str(enum no_fbc_reason reason);
1290void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv); 1335void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv);
1291 1336
1292/* intel_hdmi.c */ 1337/* intel_hdmi.c */
1293void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port); 1338void intel_hdmi_init(struct drm_device *dev, i915_reg_t hdmi_reg, enum port port);
1294void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, 1339void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
1295 struct intel_connector *intel_connector); 1340 struct intel_connector *intel_connector);
1296struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); 1341struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
@@ -1366,8 +1411,13 @@ void intel_psr_single_frame_update(struct drm_device *dev,
1366/* intel_runtime_pm.c */ 1411/* intel_runtime_pm.c */
1367int intel_power_domains_init(struct drm_i915_private *); 1412int intel_power_domains_init(struct drm_i915_private *);
1368void intel_power_domains_fini(struct drm_i915_private *); 1413void intel_power_domains_fini(struct drm_i915_private *);
1369void intel_power_domains_init_hw(struct drm_i915_private *dev_priv); 1414void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
1415void intel_power_domains_suspend(struct drm_i915_private *dev_priv);
1416void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv);
1417void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv);
1370void intel_runtime_pm_enable(struct drm_i915_private *dev_priv); 1418void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
1419const char *
1420intel_display_power_domain_str(enum intel_display_power_domain domain);
1371 1421
1372bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 1422bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
1373 enum intel_display_power_domain domain); 1423 enum intel_display_power_domain domain);
@@ -1377,8 +1427,6 @@ void intel_display_power_get(struct drm_i915_private *dev_priv,
1377 enum intel_display_power_domain domain); 1427 enum intel_display_power_domain domain);
1378void intel_display_power_put(struct drm_i915_private *dev_priv, 1428void intel_display_power_put(struct drm_i915_private *dev_priv,
1379 enum intel_display_power_domain domain); 1429 enum intel_display_power_domain domain);
1380void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
1381void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
1382void intel_runtime_pm_get(struct drm_i915_private *dev_priv); 1430void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
1383void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv); 1431void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
1384void intel_runtime_pm_put(struct drm_i915_private *dev_priv); 1432void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
@@ -1396,12 +1444,6 @@ void intel_init_clock_gating(struct drm_device *dev);
1396void intel_suspend_hw(struct drm_device *dev); 1444void intel_suspend_hw(struct drm_device *dev);
1397int ilk_wm_max_level(const struct drm_device *dev); 1445int ilk_wm_max_level(const struct drm_device *dev);
1398void intel_update_watermarks(struct drm_crtc *crtc); 1446void intel_update_watermarks(struct drm_crtc *crtc);
1399void intel_update_sprite_watermarks(struct drm_plane *plane,
1400 struct drm_crtc *crtc,
1401 uint32_t sprite_width,
1402 uint32_t sprite_height,
1403 int pixel_size,
1404 bool enabled, bool scaled);
1405void intel_init_pm(struct drm_device *dev); 1447void intel_init_pm(struct drm_device *dev);
1406void intel_pm_setup(struct drm_device *dev); 1448void intel_pm_setup(struct drm_device *dev);
1407void intel_gpu_ips_init(struct drm_i915_private *dev_priv); 1449void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
@@ -1429,7 +1471,8 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
1429uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config); 1471uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
1430 1472
1431/* intel_sdvo.c */ 1473/* intel_sdvo.c */
1432bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob); 1474bool intel_sdvo_init(struct drm_device *dev,
1475 i915_reg_t reg, enum port port);
1433 1476
1434 1477
1435/* intel_sprite.c */ 1478/* intel_sprite.c */
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 170ae6f4866e..fff9a66c32a1 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -60,7 +60,8 @@ static void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
60 DRM_ERROR("DPI FIFOs are not empty\n"); 60 DRM_ERROR("DPI FIFOs are not empty\n");
61} 61}
62 62
63static void write_data(struct drm_i915_private *dev_priv, u32 reg, 63static void write_data(struct drm_i915_private *dev_priv,
64 i915_reg_t reg,
64 const u8 *data, u32 len) 65 const u8 *data, u32 len)
65{ 66{
66 u32 i, j; 67 u32 i, j;
@@ -75,7 +76,8 @@ static void write_data(struct drm_i915_private *dev_priv, u32 reg,
75 } 76 }
76} 77}
77 78
78static void read_data(struct drm_i915_private *dev_priv, u32 reg, 79static void read_data(struct drm_i915_private *dev_priv,
80 i915_reg_t reg,
79 u8 *data, u32 len) 81 u8 *data, u32 len)
80{ 82{
81 u32 i, j; 83 u32 i, j;
@@ -98,7 +100,8 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
98 struct mipi_dsi_packet packet; 100 struct mipi_dsi_packet packet;
99 ssize_t ret; 101 ssize_t ret;
100 const u8 *header, *data; 102 const u8 *header, *data;
101 u32 data_reg, data_mask, ctrl_reg, ctrl_mask; 103 i915_reg_t data_reg, ctrl_reg;
104 u32 data_mask, ctrl_mask;
102 105
103 ret = mipi_dsi_create_packet(&packet, msg); 106 ret = mipi_dsi_create_packet(&packet, msg);
104 if (ret < 0) 107 if (ret < 0)
@@ -263,16 +266,18 @@ static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
263} 266}
264 267
265static bool intel_dsi_compute_config(struct intel_encoder *encoder, 268static bool intel_dsi_compute_config(struct intel_encoder *encoder,
266 struct intel_crtc_state *config) 269 struct intel_crtc_state *pipe_config)
267{ 270{
268 struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi, 271 struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
269 base); 272 base);
270 struct intel_connector *intel_connector = intel_dsi->attached_connector; 273 struct intel_connector *intel_connector = intel_dsi->attached_connector;
271 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 274 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
272 struct drm_display_mode *adjusted_mode = &config->base.adjusted_mode; 275 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
273 276
274 DRM_DEBUG_KMS("\n"); 277 DRM_DEBUG_KMS("\n");
275 278
279 pipe_config->has_dsi_encoder = true;
280
276 if (fixed_mode) 281 if (fixed_mode)
277 intel_fixed_panel_mode(fixed_mode, adjusted_mode); 282 intel_fixed_panel_mode(fixed_mode, adjusted_mode);
278 283
@@ -377,10 +382,10 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder)
377 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 382 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
378 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 383 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
379 enum port port; 384 enum port port;
380 u32 temp;
381 u32 port_ctrl;
382 385
383 if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) { 386 if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
387 u32 temp;
388
384 temp = I915_READ(VLV_CHICKEN_3); 389 temp = I915_READ(VLV_CHICKEN_3);
385 temp &= ~PIXEL_OVERLAP_CNT_MASK | 390 temp &= ~PIXEL_OVERLAP_CNT_MASK |
386 intel_dsi->pixel_overlap << 391 intel_dsi->pixel_overlap <<
@@ -389,8 +394,9 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder)
389 } 394 }
390 395
391 for_each_dsi_port(port, intel_dsi->ports) { 396 for_each_dsi_port(port, intel_dsi->ports) {
392 port_ctrl = IS_BROXTON(dev) ? BXT_MIPI_PORT_CTRL(port) : 397 i915_reg_t port_ctrl = IS_BROXTON(dev) ?
393 MIPI_PORT_CTRL(port); 398 BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
399 u32 temp;
394 400
395 temp = I915_READ(port_ctrl); 401 temp = I915_READ(port_ctrl);
396 402
@@ -416,13 +422,13 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
416 struct drm_i915_private *dev_priv = dev->dev_private; 422 struct drm_i915_private *dev_priv = dev->dev_private;
417 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 423 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
418 enum port port; 424 enum port port;
419 u32 temp;
420 u32 port_ctrl;
421 425
422 for_each_dsi_port(port, intel_dsi->ports) { 426 for_each_dsi_port(port, intel_dsi->ports) {
427 i915_reg_t port_ctrl = IS_BROXTON(dev) ?
428 BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
429 u32 temp;
430
423 /* de-assert ip_tg_enable signal */ 431 /* de-assert ip_tg_enable signal */
424 port_ctrl = IS_BROXTON(dev) ? BXT_MIPI_PORT_CTRL(port) :
425 MIPI_PORT_CTRL(port);
426 temp = I915_READ(port_ctrl); 432 temp = I915_READ(port_ctrl);
427 I915_WRITE(port_ctrl, temp & ~DPI_ENABLE); 433 I915_WRITE(port_ctrl, temp & ~DPI_ENABLE);
428 POSTING_READ(port_ctrl); 434 POSTING_READ(port_ctrl);
@@ -458,6 +464,8 @@ static void intel_dsi_enable(struct intel_encoder *encoder)
458 intel_panel_enable_backlight(intel_dsi->attached_connector); 464 intel_panel_enable_backlight(intel_dsi->attached_connector);
459} 465}
460 466
467static void intel_dsi_prepare(struct intel_encoder *intel_encoder);
468
461static void intel_dsi_pre_enable(struct intel_encoder *encoder) 469static void intel_dsi_pre_enable(struct intel_encoder *encoder)
462{ 470{
463 struct drm_device *dev = encoder->base.dev; 471 struct drm_device *dev = encoder->base.dev;
@@ -470,6 +478,9 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
470 478
471 DRM_DEBUG_KMS("\n"); 479 DRM_DEBUG_KMS("\n");
472 480
481 intel_dsi_prepare(encoder);
482 intel_enable_dsi_pll(encoder);
483
473 /* Panel Enable over CRC PMIC */ 484 /* Panel Enable over CRC PMIC */
474 if (intel_dsi->gpio_panel) 485 if (intel_dsi->gpio_panel)
475 gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1); 486 gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1);
@@ -580,11 +591,13 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
580 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 591 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
581 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 592 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
582 enum port port; 593 enum port port;
583 u32 val;
584 u32 port_ctrl = 0;
585 594
586 DRM_DEBUG_KMS("\n"); 595 DRM_DEBUG_KMS("\n");
587 for_each_dsi_port(port, intel_dsi->ports) { 596 for_each_dsi_port(port, intel_dsi->ports) {
597 /* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
598 i915_reg_t port_ctrl = IS_BROXTON(dev) ?
599 BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
600 u32 val;
588 601
589 I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY | 602 I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
590 ULPS_STATE_ENTER); 603 ULPS_STATE_ENTER);
@@ -598,12 +611,6 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
598 ULPS_STATE_ENTER); 611 ULPS_STATE_ENTER);
599 usleep_range(2000, 2500); 612 usleep_range(2000, 2500);
600 613
601 if (IS_BROXTON(dev))
602 port_ctrl = BXT_MIPI_PORT_CTRL(port);
603 else if (IS_VALLEYVIEW(dev))
604 /* Common bit for both MIPI Port A & MIPI Port C */
605 port_ctrl = MIPI_PORT_CTRL(PORT_A);
606
607 /* Wait till Clock lanes are in LP-00 state for MIPI Port A 614 /* Wait till Clock lanes are in LP-00 state for MIPI Port A
608 * only. MIPI Port C has no similar bit for checking 615 * only. MIPI Port C has no similar bit for checking
609 */ 616 */
@@ -656,7 +663,6 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
656 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 663 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
657 struct drm_device *dev = encoder->base.dev; 664 struct drm_device *dev = encoder->base.dev;
658 enum intel_display_power_domain power_domain; 665 enum intel_display_power_domain power_domain;
659 u32 dpi_enabled, func, ctrl_reg;
660 enum port port; 666 enum port port;
661 667
662 DRM_DEBUG_KMS("\n"); 668 DRM_DEBUG_KMS("\n");
@@ -667,9 +673,11 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
667 673
668 /* XXX: this only works for one DSI output */ 674 /* XXX: this only works for one DSI output */
669 for_each_dsi_port(port, intel_dsi->ports) { 675 for_each_dsi_port(port, intel_dsi->ports) {
676 i915_reg_t ctrl_reg = IS_BROXTON(dev) ?
677 BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
678 u32 dpi_enabled, func;
679
670 func = I915_READ(MIPI_DSI_FUNC_PRG(port)); 680 func = I915_READ(MIPI_DSI_FUNC_PRG(port));
671 ctrl_reg = IS_BROXTON(dev) ? BXT_MIPI_PORT_CTRL(port) :
672 MIPI_PORT_CTRL(port);
673 dpi_enabled = I915_READ(ctrl_reg) & DPI_ENABLE; 681 dpi_enabled = I915_READ(ctrl_reg) & DPI_ENABLE;
674 682
675 /* Due to some hardware limitations on BYT, MIPI Port C DPI 683 /* Due to some hardware limitations on BYT, MIPI Port C DPI
@@ -698,6 +706,8 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
698 u32 pclk = 0; 706 u32 pclk = 0;
699 DRM_DEBUG_KMS("\n"); 707 DRM_DEBUG_KMS("\n");
700 708
709 pipe_config->has_dsi_encoder = true;
710
701 /* 711 /*
702 * DPLL_MD is not used in case of DSI, reading will get some default value 712 * DPLL_MD is not used in case of DSI, reading will get some default value
703 * set dpll_md = 0 713 * set dpll_md = 0
@@ -1025,15 +1035,6 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
1025 } 1035 }
1026} 1036}
1027 1037
1028static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
1029{
1030 DRM_DEBUG_KMS("\n");
1031
1032 intel_dsi_prepare(encoder);
1033 intel_enable_dsi_pll(encoder);
1034
1035}
1036
1037static enum drm_connector_status 1038static enum drm_connector_status
1038intel_dsi_detect(struct drm_connector *connector, bool force) 1039intel_dsi_detect(struct drm_connector *connector, bool force)
1039{ 1040{
@@ -1151,11 +1152,10 @@ void intel_dsi_init(struct drm_device *dev)
1151 1152
1152 connector = &intel_connector->base; 1153 connector = &intel_connector->base;
1153 1154
1154 drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI); 1155 drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI,
1156 NULL);
1155 1157
1156 /* XXX: very likely not all of these are needed */
1157 intel_encoder->compute_config = intel_dsi_compute_config; 1158 intel_encoder->compute_config = intel_dsi_compute_config;
1158 intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable;
1159 intel_encoder->pre_enable = intel_dsi_pre_enable; 1159 intel_encoder->pre_enable = intel_dsi_pre_enable;
1160 intel_encoder->enable = intel_dsi_enable_nop; 1160 intel_encoder->enable = intel_dsi_enable_nop;
1161 intel_encoder->disable = intel_dsi_pre_disable; 1161 intel_encoder->disable = intel_dsi_pre_disable;
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index e6cb25239941..02551ff228c2 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -117,7 +117,7 @@ static inline struct intel_dsi_host *to_intel_dsi_host(struct mipi_dsi_host *h)
117 117
118#define for_each_dsi_port(__port, __ports_mask) \ 118#define for_each_dsi_port(__port, __ports_mask) \
119 for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \ 119 for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
120 if ((__ports_mask) & (1 << (__port))) 120 for_each_if ((__ports_mask) & (1 << (__port)))
121 121
122static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder) 122static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
123{ 123{
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 8492053e0ff0..286baec979c8 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -44,6 +44,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
44 .type = INTEL_DVO_CHIP_TMDS, 44 .type = INTEL_DVO_CHIP_TMDS,
45 .name = "sil164", 45 .name = "sil164",
46 .dvo_reg = DVOC, 46 .dvo_reg = DVOC,
47 .dvo_srcdim_reg = DVOC_SRCDIM,
47 .slave_addr = SIL164_ADDR, 48 .slave_addr = SIL164_ADDR,
48 .dev_ops = &sil164_ops, 49 .dev_ops = &sil164_ops,
49 }, 50 },
@@ -51,6 +52,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
51 .type = INTEL_DVO_CHIP_TMDS, 52 .type = INTEL_DVO_CHIP_TMDS,
52 .name = "ch7xxx", 53 .name = "ch7xxx",
53 .dvo_reg = DVOC, 54 .dvo_reg = DVOC,
55 .dvo_srcdim_reg = DVOC_SRCDIM,
54 .slave_addr = CH7xxx_ADDR, 56 .slave_addr = CH7xxx_ADDR,
55 .dev_ops = &ch7xxx_ops, 57 .dev_ops = &ch7xxx_ops,
56 }, 58 },
@@ -58,6 +60,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
58 .type = INTEL_DVO_CHIP_TMDS, 60 .type = INTEL_DVO_CHIP_TMDS,
59 .name = "ch7xxx", 61 .name = "ch7xxx",
60 .dvo_reg = DVOC, 62 .dvo_reg = DVOC,
63 .dvo_srcdim_reg = DVOC_SRCDIM,
61 .slave_addr = 0x75, /* For some ch7010 */ 64 .slave_addr = 0x75, /* For some ch7010 */
62 .dev_ops = &ch7xxx_ops, 65 .dev_ops = &ch7xxx_ops,
63 }, 66 },
@@ -65,6 +68,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
65 .type = INTEL_DVO_CHIP_LVDS, 68 .type = INTEL_DVO_CHIP_LVDS,
66 .name = "ivch", 69 .name = "ivch",
67 .dvo_reg = DVOA, 70 .dvo_reg = DVOA,
71 .dvo_srcdim_reg = DVOA_SRCDIM,
68 .slave_addr = 0x02, /* Might also be 0x44, 0x84, 0xc4 */ 72 .slave_addr = 0x02, /* Might also be 0x44, 0x84, 0xc4 */
69 .dev_ops = &ivch_ops, 73 .dev_ops = &ivch_ops,
70 }, 74 },
@@ -72,6 +76,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
72 .type = INTEL_DVO_CHIP_TMDS, 76 .type = INTEL_DVO_CHIP_TMDS,
73 .name = "tfp410", 77 .name = "tfp410",
74 .dvo_reg = DVOC, 78 .dvo_reg = DVOC,
79 .dvo_srcdim_reg = DVOC_SRCDIM,
75 .slave_addr = TFP410_ADDR, 80 .slave_addr = TFP410_ADDR,
76 .dev_ops = &tfp410_ops, 81 .dev_ops = &tfp410_ops,
77 }, 82 },
@@ -79,6 +84,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
79 .type = INTEL_DVO_CHIP_LVDS, 84 .type = INTEL_DVO_CHIP_LVDS,
80 .name = "ch7017", 85 .name = "ch7017",
81 .dvo_reg = DVOC, 86 .dvo_reg = DVOC,
87 .dvo_srcdim_reg = DVOC_SRCDIM,
82 .slave_addr = 0x75, 88 .slave_addr = 0x75,
83 .gpio = GMBUS_PIN_DPB, 89 .gpio = GMBUS_PIN_DPB,
84 .dev_ops = &ch7017_ops, 90 .dev_ops = &ch7017_ops,
@@ -87,6 +93,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
87 .type = INTEL_DVO_CHIP_TMDS, 93 .type = INTEL_DVO_CHIP_TMDS,
88 .name = "ns2501", 94 .name = "ns2501",
89 .dvo_reg = DVOB, 95 .dvo_reg = DVOB,
96 .dvo_srcdim_reg = DVOB_SRCDIM,
90 .slave_addr = NS2501_ADDR, 97 .slave_addr = NS2501_ADDR,
91 .dev_ops = &ns2501_ops, 98 .dev_ops = &ns2501_ops,
92 } 99 }
@@ -171,7 +178,7 @@ static void intel_disable_dvo(struct intel_encoder *encoder)
171{ 178{
172 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 179 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
173 struct intel_dvo *intel_dvo = enc_to_dvo(encoder); 180 struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
174 u32 dvo_reg = intel_dvo->dev.dvo_reg; 181 i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
175 u32 temp = I915_READ(dvo_reg); 182 u32 temp = I915_READ(dvo_reg);
176 183
177 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false); 184 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
@@ -184,7 +191,7 @@ static void intel_enable_dvo(struct intel_encoder *encoder)
184 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 191 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
185 struct intel_dvo *intel_dvo = enc_to_dvo(encoder); 192 struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
186 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 193 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
187 u32 dvo_reg = intel_dvo->dev.dvo_reg; 194 i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
188 u32 temp = I915_READ(dvo_reg); 195 u32 temp = I915_READ(dvo_reg);
189 196
190 intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, 197 intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
@@ -255,20 +262,8 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder)
255 struct intel_dvo *intel_dvo = enc_to_dvo(encoder); 262 struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
256 int pipe = crtc->pipe; 263 int pipe = crtc->pipe;
257 u32 dvo_val; 264 u32 dvo_val;
258 u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg; 265 i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
259 266 i915_reg_t dvo_srcdim_reg = intel_dvo->dev.dvo_srcdim_reg;
260 switch (dvo_reg) {
261 case DVOA:
262 default:
263 dvo_srcdim_reg = DVOA_SRCDIM;
264 break;
265 case DVOB:
266 dvo_srcdim_reg = DVOB_SRCDIM;
267 break;
268 case DVOC:
269 dvo_srcdim_reg = DVOC_SRCDIM;
270 break;
271 }
272 267
273 /* Save the data order, since I don't know what it should be set to. */ 268 /* Save the data order, since I don't know what it should be set to. */
274 dvo_val = I915_READ(dvo_reg) & 269 dvo_val = I915_READ(dvo_reg) &
@@ -434,7 +429,7 @@ void intel_dvo_init(struct drm_device *dev)
434 429
435 intel_encoder = &intel_dvo->base; 430 intel_encoder = &intel_dvo->base;
436 drm_encoder_init(dev, &intel_encoder->base, 431 drm_encoder_init(dev, &intel_encoder->base,
437 &intel_dvo_enc_funcs, encoder_type); 432 &intel_dvo_enc_funcs, encoder_type, NULL);
438 433
439 intel_encoder->disable = intel_disable_dvo; 434 intel_encoder->disable = intel_disable_dvo;
440 intel_encoder->enable = intel_enable_dvo; 435 intel_encoder->enable = intel_enable_dvo;
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index cf47352b7b8e..a1988a486b92 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -43,7 +43,17 @@
43 43
44static inline bool fbc_supported(struct drm_i915_private *dev_priv) 44static inline bool fbc_supported(struct drm_i915_private *dev_priv)
45{ 45{
46 return dev_priv->fbc.enable_fbc != NULL; 46 return dev_priv->fbc.activate != NULL;
47}
48
49static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv)
50{
51 return IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8;
52}
53
54static inline bool fbc_on_plane_a_only(struct drm_i915_private *dev_priv)
55{
56 return INTEL_INFO(dev_priv)->gen < 4;
47} 57}
48 58
49/* 59/*
@@ -59,11 +69,51 @@ static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc)
59 return crtc->base.y - crtc->adjusted_y; 69 return crtc->base.y - crtc->adjusted_y;
60} 70}
61 71
62static void i8xx_fbc_disable(struct drm_i915_private *dev_priv) 72/*
73 * For SKL+, the plane source size used by the hardware is based on the value we
74 * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
75 * we wrote to PIPESRC.
76 */
77static void intel_fbc_get_plane_source_size(struct intel_crtc *crtc,
78 int *width, int *height)
79{
80 struct intel_plane_state *plane_state =
81 to_intel_plane_state(crtc->base.primary->state);
82 int w, h;
83
84 if (intel_rotation_90_or_270(plane_state->base.rotation)) {
85 w = drm_rect_height(&plane_state->src) >> 16;
86 h = drm_rect_width(&plane_state->src) >> 16;
87 } else {
88 w = drm_rect_width(&plane_state->src) >> 16;
89 h = drm_rect_height(&plane_state->src) >> 16;
90 }
91
92 if (width)
93 *width = w;
94 if (height)
95 *height = h;
96}
97
98static int intel_fbc_calculate_cfb_size(struct intel_crtc *crtc,
99 struct drm_framebuffer *fb)
100{
101 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
102 int lines;
103
104 intel_fbc_get_plane_source_size(crtc, NULL, &lines);
105 if (INTEL_INFO(dev_priv)->gen >= 7)
106 lines = min(lines, 2048);
107
108 /* Hardware needs the full buffer stride, not just the active area. */
109 return lines * fb->pitches[0];
110}
111
112static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
63{ 113{
64 u32 fbc_ctl; 114 u32 fbc_ctl;
65 115
66 dev_priv->fbc.enabled = false; 116 dev_priv->fbc.active = false;
67 117
68 /* Disable compression */ 118 /* Disable compression */
69 fbc_ctl = I915_READ(FBC_CONTROL); 119 fbc_ctl = I915_READ(FBC_CONTROL);
@@ -78,11 +128,9 @@ static void i8xx_fbc_disable(struct drm_i915_private *dev_priv)
78 DRM_DEBUG_KMS("FBC idle timed out\n"); 128 DRM_DEBUG_KMS("FBC idle timed out\n");
79 return; 129 return;
80 } 130 }
81
82 DRM_DEBUG_KMS("disabled FBC\n");
83} 131}
84 132
85static void i8xx_fbc_enable(struct intel_crtc *crtc) 133static void i8xx_fbc_activate(struct intel_crtc *crtc)
86{ 134{
87 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 135 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
88 struct drm_framebuffer *fb = crtc->base.primary->fb; 136 struct drm_framebuffer *fb = crtc->base.primary->fb;
@@ -91,10 +139,10 @@ static void i8xx_fbc_enable(struct intel_crtc *crtc)
91 int i; 139 int i;
92 u32 fbc_ctl; 140 u32 fbc_ctl;
93 141
94 dev_priv->fbc.enabled = true; 142 dev_priv->fbc.active = true;
95 143
96 /* Note: fbc.threshold == 1 for i8xx */ 144 /* Note: fbc.threshold == 1 for i8xx */
97 cfb_pitch = dev_priv->fbc.uncompressed_size / FBC_LL_SIZE; 145 cfb_pitch = intel_fbc_calculate_cfb_size(crtc, fb) / FBC_LL_SIZE;
98 if (fb->pitches[0] < cfb_pitch) 146 if (fb->pitches[0] < cfb_pitch)
99 cfb_pitch = fb->pitches[0]; 147 cfb_pitch = fb->pitches[0];
100 148
@@ -127,24 +175,21 @@ static void i8xx_fbc_enable(struct intel_crtc *crtc)
127 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; 175 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
128 fbc_ctl |= obj->fence_reg; 176 fbc_ctl |= obj->fence_reg;
129 I915_WRITE(FBC_CONTROL, fbc_ctl); 177 I915_WRITE(FBC_CONTROL, fbc_ctl);
130
131 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
132 cfb_pitch, crtc->base.y, plane_name(crtc->plane));
133} 178}
134 179
135static bool i8xx_fbc_enabled(struct drm_i915_private *dev_priv) 180static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv)
136{ 181{
137 return I915_READ(FBC_CONTROL) & FBC_CTL_EN; 182 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
138} 183}
139 184
140static void g4x_fbc_enable(struct intel_crtc *crtc) 185static void g4x_fbc_activate(struct intel_crtc *crtc)
141{ 186{
142 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 187 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
143 struct drm_framebuffer *fb = crtc->base.primary->fb; 188 struct drm_framebuffer *fb = crtc->base.primary->fb;
144 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 189 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
145 u32 dpfc_ctl; 190 u32 dpfc_ctl;
146 191
147 dev_priv->fbc.enabled = true; 192 dev_priv->fbc.active = true;
148 193
149 dpfc_ctl = DPFC_CTL_PLANE(crtc->plane) | DPFC_SR_EN; 194 dpfc_ctl = DPFC_CTL_PLANE(crtc->plane) | DPFC_SR_EN;
150 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) 195 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
@@ -157,38 +202,35 @@ static void g4x_fbc_enable(struct intel_crtc *crtc)
157 202
158 /* enable it... */ 203 /* enable it... */
159 I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 204 I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
160
161 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
162} 205}
163 206
164static void g4x_fbc_disable(struct drm_i915_private *dev_priv) 207static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv)
165{ 208{
166 u32 dpfc_ctl; 209 u32 dpfc_ctl;
167 210
168 dev_priv->fbc.enabled = false; 211 dev_priv->fbc.active = false;
169 212
170 /* Disable compression */ 213 /* Disable compression */
171 dpfc_ctl = I915_READ(DPFC_CONTROL); 214 dpfc_ctl = I915_READ(DPFC_CONTROL);
172 if (dpfc_ctl & DPFC_CTL_EN) { 215 if (dpfc_ctl & DPFC_CTL_EN) {
173 dpfc_ctl &= ~DPFC_CTL_EN; 216 dpfc_ctl &= ~DPFC_CTL_EN;
174 I915_WRITE(DPFC_CONTROL, dpfc_ctl); 217 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
175
176 DRM_DEBUG_KMS("disabled FBC\n");
177 } 218 }
178} 219}
179 220
180static bool g4x_fbc_enabled(struct drm_i915_private *dev_priv) 221static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv)
181{ 222{
182 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; 223 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
183} 224}
184 225
185static void intel_fbc_nuke(struct drm_i915_private *dev_priv) 226/* This function forces a CFB recompression through the nuke operation. */
227static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
186{ 228{
187 I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE); 229 I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE);
188 POSTING_READ(MSG_FBC_REND_STATE); 230 POSTING_READ(MSG_FBC_REND_STATE);
189} 231}
190 232
191static void ilk_fbc_enable(struct intel_crtc *crtc) 233static void ilk_fbc_activate(struct intel_crtc *crtc)
192{ 234{
193 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 235 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
194 struct drm_framebuffer *fb = crtc->base.primary->fb; 236 struct drm_framebuffer *fb = crtc->base.primary->fb;
@@ -197,7 +239,7 @@ static void ilk_fbc_enable(struct intel_crtc *crtc)
197 int threshold = dev_priv->fbc.threshold; 239 int threshold = dev_priv->fbc.threshold;
198 unsigned int y_offset; 240 unsigned int y_offset;
199 241
200 dev_priv->fbc.enabled = true; 242 dev_priv->fbc.active = true;
201 243
202 dpfc_ctl = DPFC_CTL_PLANE(crtc->plane); 244 dpfc_ctl = DPFC_CTL_PLANE(crtc->plane);
203 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) 245 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
@@ -231,33 +273,29 @@ static void ilk_fbc_enable(struct intel_crtc *crtc)
231 I915_WRITE(DPFC_CPU_FENCE_OFFSET, y_offset); 273 I915_WRITE(DPFC_CPU_FENCE_OFFSET, y_offset);
232 } 274 }
233 275
234 intel_fbc_nuke(dev_priv); 276 intel_fbc_recompress(dev_priv);
235
236 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
237} 277}
238 278
239static void ilk_fbc_disable(struct drm_i915_private *dev_priv) 279static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv)
240{ 280{
241 u32 dpfc_ctl; 281 u32 dpfc_ctl;
242 282
243 dev_priv->fbc.enabled = false; 283 dev_priv->fbc.active = false;
244 284
245 /* Disable compression */ 285 /* Disable compression */
246 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); 286 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
247 if (dpfc_ctl & DPFC_CTL_EN) { 287 if (dpfc_ctl & DPFC_CTL_EN) {
248 dpfc_ctl &= ~DPFC_CTL_EN; 288 dpfc_ctl &= ~DPFC_CTL_EN;
249 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); 289 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
250
251 DRM_DEBUG_KMS("disabled FBC\n");
252 } 290 }
253} 291}
254 292
255static bool ilk_fbc_enabled(struct drm_i915_private *dev_priv) 293static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv)
256{ 294{
257 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN; 295 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
258} 296}
259 297
260static void gen7_fbc_enable(struct intel_crtc *crtc) 298static void gen7_fbc_activate(struct intel_crtc *crtc)
261{ 299{
262 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 300 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
263 struct drm_framebuffer *fb = crtc->base.primary->fb; 301 struct drm_framebuffer *fb = crtc->base.primary->fb;
@@ -265,7 +303,7 @@ static void gen7_fbc_enable(struct intel_crtc *crtc)
265 u32 dpfc_ctl; 303 u32 dpfc_ctl;
266 int threshold = dev_priv->fbc.threshold; 304 int threshold = dev_priv->fbc.threshold;
267 305
268 dev_priv->fbc.enabled = true; 306 dev_priv->fbc.active = true;
269 307
270 dpfc_ctl = 0; 308 dpfc_ctl = 0;
271 if (IS_IVYBRIDGE(dev_priv)) 309 if (IS_IVYBRIDGE(dev_priv))
@@ -310,155 +348,120 @@ static void gen7_fbc_enable(struct intel_crtc *crtc)
310 SNB_CPU_FENCE_ENABLE | obj->fence_reg); 348 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
311 I915_WRITE(DPFC_CPU_FENCE_OFFSET, get_crtc_fence_y_offset(crtc)); 349 I915_WRITE(DPFC_CPU_FENCE_OFFSET, get_crtc_fence_y_offset(crtc));
312 350
313 intel_fbc_nuke(dev_priv); 351 intel_fbc_recompress(dev_priv);
314
315 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
316} 352}
317 353
318/** 354/**
319 * intel_fbc_enabled - Is FBC enabled? 355 * intel_fbc_is_active - Is FBC active?
320 * @dev_priv: i915 device instance 356 * @dev_priv: i915 device instance
321 * 357 *
322 * This function is used to verify the current state of FBC. 358 * This function is used to verify the current state of FBC.
323 * FIXME: This should be tracked in the plane config eventually 359 * FIXME: This should be tracked in the plane config eventually
324 * instead of queried at runtime for most callers. 360 * instead of queried at runtime for most callers.
325 */ 361 */
326bool intel_fbc_enabled(struct drm_i915_private *dev_priv) 362bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
327{ 363{
328 return dev_priv->fbc.enabled; 364 return dev_priv->fbc.active;
329} 365}
330 366
331static void intel_fbc_enable(struct intel_crtc *crtc, 367static void intel_fbc_activate(const struct drm_framebuffer *fb)
332 const struct drm_framebuffer *fb)
333{ 368{
334 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 369 struct drm_i915_private *dev_priv = fb->dev->dev_private;
370 struct intel_crtc *crtc = dev_priv->fbc.crtc;
335 371
336 dev_priv->fbc.enable_fbc(crtc); 372 dev_priv->fbc.activate(crtc);
337 373
338 dev_priv->fbc.crtc = crtc;
339 dev_priv->fbc.fb_id = fb->base.id; 374 dev_priv->fbc.fb_id = fb->base.id;
340 dev_priv->fbc.y = crtc->base.y; 375 dev_priv->fbc.y = crtc->base.y;
341} 376}
342 377
343static void intel_fbc_work_fn(struct work_struct *__work) 378static void intel_fbc_work_fn(struct work_struct *__work)
344{ 379{
345 struct intel_fbc_work *work = 380 struct drm_i915_private *dev_priv =
346 container_of(to_delayed_work(__work), 381 container_of(__work, struct drm_i915_private, fbc.work.work);
347 struct intel_fbc_work, work); 382 struct intel_fbc_work *work = &dev_priv->fbc.work;
348 struct drm_i915_private *dev_priv = work->crtc->base.dev->dev_private; 383 struct intel_crtc *crtc = dev_priv->fbc.crtc;
349 struct drm_framebuffer *crtc_fb = work->crtc->base.primary->fb; 384 int delay_ms = 50;
385
386retry:
387 /* Delay the actual enabling to let pageflipping cease and the
388 * display to settle before starting the compression. Note that
389 * this delay also serves a second purpose: it allows for a
390 * vblank to pass after disabling the FBC before we attempt
391 * to modify the control registers.
392 *
393 * A more complicated solution would involve tracking vblanks
394 * following the termination of the page-flipping sequence
395 * and indeed performing the enable as a co-routine and not
396 * waiting synchronously upon the vblank.
397 *
398 * WaFbcWaitForVBlankBeforeEnable:ilk,snb
399 */
400 wait_remaining_ms_from_jiffies(work->enable_jiffies, delay_ms);
350 401
351 mutex_lock(&dev_priv->fbc.lock); 402 mutex_lock(&dev_priv->fbc.lock);
352 if (work == dev_priv->fbc.fbc_work) {
353 /* Double check that we haven't switched fb without cancelling
354 * the prior work.
355 */
356 if (crtc_fb == work->fb)
357 intel_fbc_enable(work->crtc, work->fb);
358 403
359 dev_priv->fbc.fbc_work = NULL; 404 /* Were we cancelled? */
405 if (!work->scheduled)
406 goto out;
407
408 /* Were we delayed again while this function was sleeping? */
409 if (time_after(work->enable_jiffies + msecs_to_jiffies(delay_ms),
410 jiffies)) {
411 mutex_unlock(&dev_priv->fbc.lock);
412 goto retry;
360 } 413 }
361 mutex_unlock(&dev_priv->fbc.lock);
362 414
363 kfree(work); 415 if (crtc->base.primary->fb == work->fb)
416 intel_fbc_activate(work->fb);
417
418 work->scheduled = false;
419
420out:
421 mutex_unlock(&dev_priv->fbc.lock);
364} 422}
365 423
366static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv) 424static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
367{ 425{
368 WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock)); 426 WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
369 427 dev_priv->fbc.work.scheduled = false;
370 if (dev_priv->fbc.fbc_work == NULL)
371 return;
372
373 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
374
375 /* Synchronisation is provided by struct_mutex and checking of
376 * dev_priv->fbc.fbc_work, so we can perform the cancellation
377 * entirely asynchronously.
378 */
379 if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
380 /* tasklet was killed before being run, clean up */
381 kfree(dev_priv->fbc.fbc_work);
382
383 /* Mark the work as no longer wanted so that if it does
384 * wake-up (because the work was already running and waiting
385 * for our mutex), it will discover that is no longer
386 * necessary to run.
387 */
388 dev_priv->fbc.fbc_work = NULL;
389} 428}
390 429
391static void intel_fbc_schedule_enable(struct intel_crtc *crtc) 430static void intel_fbc_schedule_activation(struct intel_crtc *crtc)
392{ 431{
393 struct intel_fbc_work *work;
394 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 432 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
433 struct intel_fbc_work *work = &dev_priv->fbc.work;
395 434
396 WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock)); 435 WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
397 436
398 intel_fbc_cancel_work(dev_priv); 437 /* It is useless to call intel_fbc_cancel_work() in this function since
399 438 * we're not releasing fbc.lock, so it won't have an opportunity to grab
400 work = kzalloc(sizeof(*work), GFP_KERNEL); 439 * it to discover that it was cancelled. So we just update the expected
401 if (work == NULL) { 440 * jiffy count. */
402 DRM_ERROR("Failed to allocate FBC work structure\n");
403 intel_fbc_enable(crtc, crtc->base.primary->fb);
404 return;
405 }
406
407 work->crtc = crtc;
408 work->fb = crtc->base.primary->fb; 441 work->fb = crtc->base.primary->fb;
409 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn); 442 work->scheduled = true;
410 443 work->enable_jiffies = jiffies;
411 dev_priv->fbc.fbc_work = work;
412 444
413 /* Delay the actual enabling to let pageflipping cease and the 445 schedule_work(&work->work);
414 * display to settle before starting the compression. Note that
415 * this delay also serves a second purpose: it allows for a
416 * vblank to pass after disabling the FBC before we attempt
417 * to modify the control registers.
418 *
419 * A more complicated solution would involve tracking vblanks
420 * following the termination of the page-flipping sequence
421 * and indeed performing the enable as a co-routine and not
422 * waiting synchronously upon the vblank.
423 *
424 * WaFbcWaitForVBlankBeforeEnable:ilk,snb
425 */
426 schedule_delayed_work(&work->work, msecs_to_jiffies(50));
427} 446}
428 447
429static void __intel_fbc_disable(struct drm_i915_private *dev_priv) 448static void __intel_fbc_deactivate(struct drm_i915_private *dev_priv)
430{ 449{
431 WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock)); 450 WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
432 451
433 intel_fbc_cancel_work(dev_priv); 452 intel_fbc_cancel_work(dev_priv);
434 453
435 dev_priv->fbc.disable_fbc(dev_priv); 454 if (dev_priv->fbc.active)
436 dev_priv->fbc.crtc = NULL; 455 dev_priv->fbc.deactivate(dev_priv);
437}
438
439/**
440 * intel_fbc_disable - disable FBC
441 * @dev_priv: i915 device instance
442 *
443 * This function disables FBC.
444 */
445void intel_fbc_disable(struct drm_i915_private *dev_priv)
446{
447 if (!fbc_supported(dev_priv))
448 return;
449
450 mutex_lock(&dev_priv->fbc.lock);
451 __intel_fbc_disable(dev_priv);
452 mutex_unlock(&dev_priv->fbc.lock);
453} 456}
454 457
455/* 458/*
456 * intel_fbc_disable_crtc - disable FBC if it's associated with crtc 459 * intel_fbc_deactivate - deactivate FBC if it's associated with crtc
457 * @crtc: the CRTC 460 * @crtc: the CRTC
458 * 461 *
459 * This function disables FBC if it's associated with the provided CRTC. 462 * This function deactivates FBC if it's associated with the provided CRTC.
460 */ 463 */
461void intel_fbc_disable_crtc(struct intel_crtc *crtc) 464void intel_fbc_deactivate(struct intel_crtc *crtc)
462{ 465{
463 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 466 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
464 467
@@ -467,85 +470,42 @@ void intel_fbc_disable_crtc(struct intel_crtc *crtc)
467 470
468 mutex_lock(&dev_priv->fbc.lock); 471 mutex_lock(&dev_priv->fbc.lock);
469 if (dev_priv->fbc.crtc == crtc) 472 if (dev_priv->fbc.crtc == crtc)
470 __intel_fbc_disable(dev_priv); 473 __intel_fbc_deactivate(dev_priv);
471 mutex_unlock(&dev_priv->fbc.lock); 474 mutex_unlock(&dev_priv->fbc.lock);
472} 475}
473 476
474const char *intel_no_fbc_reason_str(enum no_fbc_reason reason)
475{
476 switch (reason) {
477 case FBC_OK:
478 return "FBC enabled but currently disabled in hardware";
479 case FBC_UNSUPPORTED:
480 return "unsupported by this chipset";
481 case FBC_NO_OUTPUT:
482 return "no output";
483 case FBC_STOLEN_TOO_SMALL:
484 return "not enough stolen memory";
485 case FBC_UNSUPPORTED_MODE:
486 return "mode incompatible with compression";
487 case FBC_MODE_TOO_LARGE:
488 return "mode too large for compression";
489 case FBC_BAD_PLANE:
490 return "FBC unsupported on plane";
491 case FBC_NOT_TILED:
492 return "framebuffer not tiled or fenced";
493 case FBC_MULTIPLE_PIPES:
494 return "more than one pipe active";
495 case FBC_MODULE_PARAM:
496 return "disabled per module param";
497 case FBC_CHIP_DEFAULT:
498 return "disabled per chip default";
499 case FBC_ROTATION:
500 return "rotation unsupported";
501 case FBC_IN_DBG_MASTER:
502 return "Kernel debugger is active";
503 case FBC_BAD_STRIDE:
504 return "framebuffer stride not supported";
505 case FBC_PIXEL_RATE:
506 return "pixel rate is too big";
507 case FBC_PIXEL_FORMAT:
508 return "pixel format is invalid";
509 default:
510 MISSING_CASE(reason);
511 return "unknown reason";
512 }
513}
514
515static void set_no_fbc_reason(struct drm_i915_private *dev_priv, 477static void set_no_fbc_reason(struct drm_i915_private *dev_priv,
516 enum no_fbc_reason reason) 478 const char *reason)
517{ 479{
518 if (dev_priv->fbc.no_fbc_reason == reason) 480 if (dev_priv->fbc.no_fbc_reason == reason)
519 return; 481 return;
520 482
521 dev_priv->fbc.no_fbc_reason = reason; 483 dev_priv->fbc.no_fbc_reason = reason;
522 DRM_DEBUG_KMS("Disabling FBC: %s\n", intel_no_fbc_reason_str(reason)); 484 DRM_DEBUG_KMS("Disabling FBC: %s\n", reason);
523} 485}
524 486
525static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv) 487static bool crtc_can_fbc(struct intel_crtc *crtc)
526{ 488{
527 struct drm_crtc *crtc = NULL, *tmp_crtc; 489 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
528 enum pipe pipe;
529 bool pipe_a_only = false;
530 490
531 if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8) 491 if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A)
532 pipe_a_only = true; 492 return false;
533 493
534 for_each_pipe(dev_priv, pipe) { 494 if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A)
535 tmp_crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 495 return false;
536 496
537 if (intel_crtc_active(tmp_crtc) && 497 return true;
538 to_intel_plane_state(tmp_crtc->primary->state)->visible) 498}
539 crtc = tmp_crtc;
540 499
541 if (pipe_a_only) 500static bool crtc_is_valid(struct intel_crtc *crtc)
542 break; 501{
543 } 502 if (!intel_crtc_active(&crtc->base))
503 return false;
544 504
545 if (!crtc || crtc->primary->fb == NULL) 505 if (!to_intel_plane_state(crtc->base.primary->state)->visible)
546 return NULL; 506 return false;
547 507
548 return crtc; 508 return true;
549} 509}
550 510
551static bool multiple_pipes_ok(struct drm_i915_private *dev_priv) 511static bool multiple_pipes_ok(struct drm_i915_private *dev_priv)
@@ -581,7 +541,8 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
581 * reserved range size, so it always assumes the maximum (8mb) is used. 541 * reserved range size, so it always assumes the maximum (8mb) is used.
582 * If we enable FBC using a CFB on that memory range we'll get FIFO 542 * If we enable FBC using a CFB on that memory range we'll get FIFO
583 * underruns, even if that range is not reserved by the BIOS. */ 543 * underruns, even if that range is not reserved by the BIOS. */
584 if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv)) 544 if (IS_BROADWELL(dev_priv) ||
545 IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
585 end = dev_priv->gtt.stolen_size - 8 * 1024 * 1024; 546 end = dev_priv->gtt.stolen_size - 8 * 1024 * 1024;
586 else 547 else
587 end = dev_priv->gtt.stolen_usable_size; 548 end = dev_priv->gtt.stolen_usable_size;
@@ -617,11 +578,17 @@ again:
617 } 578 }
618} 579}
619 580
620static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, int size, 581static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
621 int fb_cpp)
622{ 582{
583 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
584 struct drm_framebuffer *fb = crtc->base.primary->state->fb;
623 struct drm_mm_node *uninitialized_var(compressed_llb); 585 struct drm_mm_node *uninitialized_var(compressed_llb);
624 int ret; 586 int size, fb_cpp, ret;
587
588 WARN_ON(drm_mm_node_allocated(&dev_priv->fbc.compressed_fb));
589
590 size = intel_fbc_calculate_cfb_size(crtc, fb);
591 fb_cpp = drm_format_plane_cpp(fb->pixel_format, 0);
625 592
626 ret = find_compression_threshold(dev_priv, &dev_priv->fbc.compressed_fb, 593 ret = find_compression_threshold(dev_priv, &dev_priv->fbc.compressed_fb,
627 size, fb_cpp); 594 size, fb_cpp);
@@ -656,8 +623,6 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, int size,
656 dev_priv->mm.stolen_base + compressed_llb->start); 623 dev_priv->mm.stolen_base + compressed_llb->start);
657 } 624 }
658 625
659 dev_priv->fbc.uncompressed_size = size;
660
661 DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n", 626 DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
662 dev_priv->fbc.compressed_fb.size, 627 dev_priv->fbc.compressed_fb.size,
663 dev_priv->fbc.threshold); 628 dev_priv->fbc.threshold);
@@ -674,18 +639,15 @@ err_llb:
674 639
675static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) 640static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
676{ 641{
677 if (dev_priv->fbc.uncompressed_size == 0) 642 if (drm_mm_node_allocated(&dev_priv->fbc.compressed_fb))
678 return; 643 i915_gem_stolen_remove_node(dev_priv,
679 644 &dev_priv->fbc.compressed_fb);
680 i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
681 645
682 if (dev_priv->fbc.compressed_llb) { 646 if (dev_priv->fbc.compressed_llb) {
683 i915_gem_stolen_remove_node(dev_priv, 647 i915_gem_stolen_remove_node(dev_priv,
684 dev_priv->fbc.compressed_llb); 648 dev_priv->fbc.compressed_llb);
685 kfree(dev_priv->fbc.compressed_llb); 649 kfree(dev_priv->fbc.compressed_llb);
686 } 650 }
687
688 dev_priv->fbc.uncompressed_size = 0;
689} 651}
690 652
691void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) 653void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
@@ -698,63 +660,6 @@ void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
698 mutex_unlock(&dev_priv->fbc.lock); 660 mutex_unlock(&dev_priv->fbc.lock);
699} 661}
700 662
701/*
702 * For SKL+, the plane source size used by the hardware is based on the value we
703 * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
704 * we wrote to PIPESRC.
705 */
706static void intel_fbc_get_plane_source_size(struct intel_crtc *crtc,
707 int *width, int *height)
708{
709 struct intel_plane_state *plane_state =
710 to_intel_plane_state(crtc->base.primary->state);
711 int w, h;
712
713 if (intel_rotation_90_or_270(plane_state->base.rotation)) {
714 w = drm_rect_height(&plane_state->src) >> 16;
715 h = drm_rect_width(&plane_state->src) >> 16;
716 } else {
717 w = drm_rect_width(&plane_state->src) >> 16;
718 h = drm_rect_height(&plane_state->src) >> 16;
719 }
720
721 if (width)
722 *width = w;
723 if (height)
724 *height = h;
725}
726
727static int intel_fbc_calculate_cfb_size(struct intel_crtc *crtc)
728{
729 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
730 struct drm_framebuffer *fb = crtc->base.primary->fb;
731 int lines;
732
733 intel_fbc_get_plane_source_size(crtc, NULL, &lines);
734 if (INTEL_INFO(dev_priv)->gen >= 7)
735 lines = min(lines, 2048);
736
737 return lines * fb->pitches[0];
738}
739
740static int intel_fbc_setup_cfb(struct intel_crtc *crtc)
741{
742 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
743 struct drm_framebuffer *fb = crtc->base.primary->fb;
744 int size, cpp;
745
746 size = intel_fbc_calculate_cfb_size(crtc);
747 cpp = drm_format_plane_cpp(fb->pixel_format, 0);
748
749 if (size <= dev_priv->fbc.uncompressed_size)
750 return 0;
751
752 /* Release any current block */
753 __intel_fbc_cleanup_cfb(dev_priv);
754
755 return intel_fbc_alloc_cfb(dev_priv, size, cpp);
756}
757
758static bool stride_is_valid(struct drm_i915_private *dev_priv, 663static bool stride_is_valid(struct drm_i915_private *dev_priv,
759 unsigned int stride) 664 unsigned int stride)
760{ 665{
@@ -829,87 +734,46 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
829} 734}
830 735
831/** 736/**
832 * __intel_fbc_update - enable/disable FBC as needed, unlocked 737 * __intel_fbc_update - activate/deactivate FBC as needed, unlocked
833 * @dev_priv: i915 device instance 738 * @crtc: the CRTC that triggered the update
834 *
835 * Set up the framebuffer compression hardware at mode set time. We
836 * enable it if possible:
837 * - plane A only (on pre-965)
838 * - no pixel mulitply/line duplication
839 * - no alpha buffer discard
840 * - no dual wide
841 * - framebuffer <= max_hdisplay in width, max_vdisplay in height
842 *
843 * We can't assume that any compression will take place (worst case),
844 * so the compressed buffer has to be the same size as the uncompressed
845 * one. It also must reside (along with the line length buffer) in
846 * stolen memory.
847 * 739 *
848 * We need to enable/disable FBC on a global basis. 740 * This function completely reevaluates the status of FBC, then activates,
741 * deactivates or maintains it on the same state.
849 */ 742 */
850static void __intel_fbc_update(struct drm_i915_private *dev_priv) 743static void __intel_fbc_update(struct intel_crtc *crtc)
851{ 744{
852 struct drm_crtc *crtc = NULL; 745 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
853 struct intel_crtc *intel_crtc;
854 struct drm_framebuffer *fb; 746 struct drm_framebuffer *fb;
855 struct drm_i915_gem_object *obj; 747 struct drm_i915_gem_object *obj;
856 const struct drm_display_mode *adjusted_mode; 748 const struct drm_display_mode *adjusted_mode;
857 749
858 WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock)); 750 WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
859 751
860 /* disable framebuffer compression in vGPU */ 752 if (!multiple_pipes_ok(dev_priv)) {
861 if (intel_vgpu_active(dev_priv->dev)) 753 set_no_fbc_reason(dev_priv, "more than one pipe active");
862 i915.enable_fbc = 0;
863
864 if (i915.enable_fbc < 0) {
865 set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT);
866 goto out_disable; 754 goto out_disable;
867 } 755 }
868 756
869 if (!i915.enable_fbc) { 757 if (!dev_priv->fbc.enabled || dev_priv->fbc.crtc != crtc)
870 set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM); 758 return;
871 goto out_disable;
872 }
873 759
874 /* 760 if (!crtc_is_valid(crtc)) {
875 * If FBC is already on, we just have to verify that we can 761 set_no_fbc_reason(dev_priv, "no output");
876 * keep it that way...
877 * Need to disable if:
878 * - more than one pipe is active
879 * - changing FBC params (stride, fence, mode)
880 * - new fb is too large to fit in compressed buffer
881 * - going to an unsupported config (interlace, pixel multiply, etc.)
882 */
883 crtc = intel_fbc_find_crtc(dev_priv);
884 if (!crtc) {
885 set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT);
886 goto out_disable; 762 goto out_disable;
887 } 763 }
888 764
889 if (!multiple_pipes_ok(dev_priv)) { 765 fb = crtc->base.primary->fb;
890 set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES);
891 goto out_disable;
892 }
893
894 intel_crtc = to_intel_crtc(crtc);
895 fb = crtc->primary->fb;
896 obj = intel_fb_obj(fb); 766 obj = intel_fb_obj(fb);
897 adjusted_mode = &intel_crtc->config->base.adjusted_mode; 767 adjusted_mode = &crtc->config->base.adjusted_mode;
898 768
899 if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) || 769 if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
900 (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) { 770 (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
901 set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE); 771 set_no_fbc_reason(dev_priv, "incompatible mode");
902 goto out_disable; 772 goto out_disable;
903 } 773 }
904 774
905 if (!intel_fbc_hw_tracking_covers_screen(intel_crtc)) { 775 if (!intel_fbc_hw_tracking_covers_screen(crtc)) {
906 set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE); 776 set_no_fbc_reason(dev_priv, "mode too large for compression");
907 goto out_disable;
908 }
909
910 if ((INTEL_INFO(dev_priv)->gen < 4 || HAS_DDI(dev_priv)) &&
911 intel_crtc->plane != PLANE_A) {
912 set_no_fbc_reason(dev_priv, FBC_BAD_PLANE);
913 goto out_disable; 777 goto out_disable;
914 } 778 }
915 779
@@ -918,41 +782,46 @@ static void __intel_fbc_update(struct drm_i915_private *dev_priv)
918 */ 782 */
919 if (obj->tiling_mode != I915_TILING_X || 783 if (obj->tiling_mode != I915_TILING_X ||
920 obj->fence_reg == I915_FENCE_REG_NONE) { 784 obj->fence_reg == I915_FENCE_REG_NONE) {
921 set_no_fbc_reason(dev_priv, FBC_NOT_TILED); 785 set_no_fbc_reason(dev_priv, "framebuffer not tiled or fenced");
922 goto out_disable; 786 goto out_disable;
923 } 787 }
924 if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) && 788 if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) &&
925 crtc->primary->state->rotation != BIT(DRM_ROTATE_0)) { 789 crtc->base.primary->state->rotation != BIT(DRM_ROTATE_0)) {
926 set_no_fbc_reason(dev_priv, FBC_ROTATION); 790 set_no_fbc_reason(dev_priv, "rotation unsupported");
927 goto out_disable; 791 goto out_disable;
928 } 792 }
929 793
930 if (!stride_is_valid(dev_priv, fb->pitches[0])) { 794 if (!stride_is_valid(dev_priv, fb->pitches[0])) {
931 set_no_fbc_reason(dev_priv, FBC_BAD_STRIDE); 795 set_no_fbc_reason(dev_priv, "framebuffer stride not supported");
932 goto out_disable; 796 goto out_disable;
933 } 797 }
934 798
935 if (!pixel_format_is_valid(fb)) { 799 if (!pixel_format_is_valid(fb)) {
936 set_no_fbc_reason(dev_priv, FBC_PIXEL_FORMAT); 800 set_no_fbc_reason(dev_priv, "pixel format is invalid");
937 goto out_disable;
938 }
939
940 /* If the kernel debugger is active, always disable compression */
941 if (in_dbg_master()) {
942 set_no_fbc_reason(dev_priv, FBC_IN_DBG_MASTER);
943 goto out_disable; 801 goto out_disable;
944 } 802 }
945 803
946 /* WaFbcExceedCdClockThreshold:hsw,bdw */ 804 /* WaFbcExceedCdClockThreshold:hsw,bdw */
947 if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) && 805 if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
948 ilk_pipe_pixel_rate(intel_crtc->config) >= 806 ilk_pipe_pixel_rate(crtc->config) >=
949 dev_priv->cdclk_freq * 95 / 100) { 807 dev_priv->cdclk_freq * 95 / 100) {
950 set_no_fbc_reason(dev_priv, FBC_PIXEL_RATE); 808 set_no_fbc_reason(dev_priv, "pixel rate is too big");
951 goto out_disable; 809 goto out_disable;
952 } 810 }
953 811
954 if (intel_fbc_setup_cfb(intel_crtc)) { 812 /* It is possible for the required CFB size change without a
955 set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL); 813 * crtc->disable + crtc->enable since it is possible to change the
814 * stride without triggering a full modeset. Since we try to
815 * over-allocate the CFB, there's a chance we may keep FBC enabled even
816 * if this happens, but if we exceed the current CFB size we'll have to
817 * disable FBC. Notice that it would be possible to disable FBC, wait
818 * for a frame, free the stolen node, then try to reenable FBC in case
819 * we didn't get any invalidate/deactivate calls, but this would require
820 * a lot of tracking just for a specific case. If we conclude it's an
821 * important case, we can implement it later. */
822 if (intel_fbc_calculate_cfb_size(crtc, fb) >
823 dev_priv->fbc.compressed_fb.size * dev_priv->fbc.threshold) {
824 set_no_fbc_reason(dev_priv, "CFB requirements changed");
956 goto out_disable; 825 goto out_disable;
957 } 826 }
958 827
@@ -961,12 +830,13 @@ static void __intel_fbc_update(struct drm_i915_private *dev_priv)
961 * cannot be unpinned (and have its GTT offset and fence revoked) 830 * cannot be unpinned (and have its GTT offset and fence revoked)
962 * without first being decoupled from the scanout and FBC disabled. 831 * without first being decoupled from the scanout and FBC disabled.
963 */ 832 */
964 if (dev_priv->fbc.crtc == intel_crtc && 833 if (dev_priv->fbc.crtc == crtc &&
965 dev_priv->fbc.fb_id == fb->base.id && 834 dev_priv->fbc.fb_id == fb->base.id &&
966 dev_priv->fbc.y == crtc->y) 835 dev_priv->fbc.y == crtc->base.y &&
836 dev_priv->fbc.active)
967 return; 837 return;
968 838
969 if (intel_fbc_enabled(dev_priv)) { 839 if (intel_fbc_is_active(dev_priv)) {
970 /* We update FBC along two paths, after changing fb/crtc 840 /* We update FBC along two paths, after changing fb/crtc
971 * configuration (modeswitching) and after page-flipping 841 * configuration (modeswitching) and after page-flipping
972 * finishes. For the latter, we know that not only did 842 * finishes. For the latter, we know that not only did
@@ -990,36 +860,37 @@ static void __intel_fbc_update(struct drm_i915_private *dev_priv)
990 * disabling paths we do need to wait for a vblank at 860 * disabling paths we do need to wait for a vblank at
991 * some point. And we wait before enabling FBC anyway. 861 * some point. And we wait before enabling FBC anyway.
992 */ 862 */
993 DRM_DEBUG_KMS("disabling active FBC for update\n"); 863 DRM_DEBUG_KMS("deactivating FBC for update\n");
994 __intel_fbc_disable(dev_priv); 864 __intel_fbc_deactivate(dev_priv);
995 } 865 }
996 866
997 intel_fbc_schedule_enable(intel_crtc); 867 intel_fbc_schedule_activation(crtc);
998 dev_priv->fbc.no_fbc_reason = FBC_OK; 868 dev_priv->fbc.no_fbc_reason = "FBC enabled (not necessarily active)";
999 return; 869 return;
1000 870
1001out_disable: 871out_disable:
1002 /* Multiple disables should be harmless */ 872 /* Multiple disables should be harmless */
1003 if (intel_fbc_enabled(dev_priv)) { 873 if (intel_fbc_is_active(dev_priv)) {
1004 DRM_DEBUG_KMS("unsupported config, disabling FBC\n"); 874 DRM_DEBUG_KMS("unsupported config, deactivating FBC\n");
1005 __intel_fbc_disable(dev_priv); 875 __intel_fbc_deactivate(dev_priv);
1006 } 876 }
1007 __intel_fbc_cleanup_cfb(dev_priv);
1008} 877}
1009 878
1010/* 879/*
1011 * intel_fbc_update - enable/disable FBC as needed 880 * intel_fbc_update - activate/deactivate FBC as needed
1012 * @dev_priv: i915 device instance 881 * @crtc: the CRTC that triggered the update
1013 * 882 *
1014 * This function reevaluates the overall state and enables or disables FBC. 883 * This function reevaluates the overall state and activates or deactivates FBC.
1015 */ 884 */
1016void intel_fbc_update(struct drm_i915_private *dev_priv) 885void intel_fbc_update(struct intel_crtc *crtc)
1017{ 886{
887 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
888
1018 if (!fbc_supported(dev_priv)) 889 if (!fbc_supported(dev_priv))
1019 return; 890 return;
1020 891
1021 mutex_lock(&dev_priv->fbc.lock); 892 mutex_lock(&dev_priv->fbc.lock);
1022 __intel_fbc_update(dev_priv); 893 __intel_fbc_update(crtc);
1023 mutex_unlock(&dev_priv->fbc.lock); 894 mutex_unlock(&dev_priv->fbc.lock);
1024} 895}
1025 896
@@ -1039,16 +910,13 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
1039 910
1040 if (dev_priv->fbc.enabled) 911 if (dev_priv->fbc.enabled)
1041 fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe); 912 fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe);
1042 else if (dev_priv->fbc.fbc_work)
1043 fbc_bits = INTEL_FRONTBUFFER_PRIMARY(
1044 dev_priv->fbc.fbc_work->crtc->pipe);
1045 else 913 else
1046 fbc_bits = dev_priv->fbc.possible_framebuffer_bits; 914 fbc_bits = dev_priv->fbc.possible_framebuffer_bits;
1047 915
1048 dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits); 916 dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits);
1049 917
1050 if (dev_priv->fbc.busy_bits) 918 if (dev_priv->fbc.busy_bits)
1051 __intel_fbc_disable(dev_priv); 919 __intel_fbc_deactivate(dev_priv);
1052 920
1053 mutex_unlock(&dev_priv->fbc.lock); 921 mutex_unlock(&dev_priv->fbc.lock);
1054} 922}
@@ -1066,11 +934,136 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
1066 934
1067 dev_priv->fbc.busy_bits &= ~frontbuffer_bits; 935 dev_priv->fbc.busy_bits &= ~frontbuffer_bits;
1068 936
1069 if (!dev_priv->fbc.busy_bits) { 937 if (!dev_priv->fbc.busy_bits && dev_priv->fbc.enabled) {
938 if (origin != ORIGIN_FLIP && dev_priv->fbc.active) {
939 intel_fbc_recompress(dev_priv);
940 } else {
941 __intel_fbc_deactivate(dev_priv);
942 __intel_fbc_update(dev_priv->fbc.crtc);
943 }
944 }
945
946 mutex_unlock(&dev_priv->fbc.lock);
947}
948
949/**
950 * intel_fbc_enable: tries to enable FBC on the CRTC
951 * @crtc: the CRTC
952 *
953 * This function checks if it's possible to enable FBC on the following CRTC,
954 * then enables it. Notice that it doesn't activate FBC.
955 */
956void intel_fbc_enable(struct intel_crtc *crtc)
957{
958 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
959
960 if (!fbc_supported(dev_priv))
961 return;
962
963 mutex_lock(&dev_priv->fbc.lock);
964
965 if (dev_priv->fbc.enabled) {
966 WARN_ON(dev_priv->fbc.crtc == crtc);
967 goto out;
968 }
969
970 WARN_ON(dev_priv->fbc.active);
971 WARN_ON(dev_priv->fbc.crtc != NULL);
972
973 if (intel_vgpu_active(dev_priv->dev)) {
974 set_no_fbc_reason(dev_priv, "VGPU is active");
975 goto out;
976 }
977
978 if (i915.enable_fbc < 0) {
979 set_no_fbc_reason(dev_priv, "disabled per chip default");
980 goto out;
981 }
982
983 if (!i915.enable_fbc) {
984 set_no_fbc_reason(dev_priv, "disabled per module param");
985 goto out;
986 }
987
988 if (!crtc_can_fbc(crtc)) {
989 set_no_fbc_reason(dev_priv, "no enabled pipes can have FBC");
990 goto out;
991 }
992
993 if (intel_fbc_alloc_cfb(crtc)) {
994 set_no_fbc_reason(dev_priv, "not enough stolen memory");
995 goto out;
996 }
997
998 DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe));
999 dev_priv->fbc.no_fbc_reason = "FBC enabled but not active yet\n";
1000
1001 dev_priv->fbc.enabled = true;
1002 dev_priv->fbc.crtc = crtc;
1003out:
1004 mutex_unlock(&dev_priv->fbc.lock);
1005}
1006
1007/**
1008 * __intel_fbc_disable - disable FBC
1009 * @dev_priv: i915 device instance
1010 *
1011 * This is the low level function that actually disables FBC. Callers should
1012 * grab the FBC lock.
1013 */
1014static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
1015{
1016 struct intel_crtc *crtc = dev_priv->fbc.crtc;
1017
1018 WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
1019 WARN_ON(!dev_priv->fbc.enabled);
1020 WARN_ON(dev_priv->fbc.active);
1021 assert_pipe_disabled(dev_priv, crtc->pipe);
1022
1023 DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
1024
1025 __intel_fbc_cleanup_cfb(dev_priv);
1026
1027 dev_priv->fbc.enabled = false;
1028 dev_priv->fbc.crtc = NULL;
1029}
1030
1031/**
1032 * intel_fbc_disable_crtc - disable FBC if it's associated with crtc
1033 * @crtc: the CRTC
1034 *
1035 * This function disables FBC if it's associated with the provided CRTC.
1036 */
1037void intel_fbc_disable_crtc(struct intel_crtc *crtc)
1038{
1039 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1040
1041 if (!fbc_supported(dev_priv))
1042 return;
1043
1044 mutex_lock(&dev_priv->fbc.lock);
1045 if (dev_priv->fbc.crtc == crtc) {
1046 WARN_ON(!dev_priv->fbc.enabled);
1047 WARN_ON(dev_priv->fbc.active);
1070 __intel_fbc_disable(dev_priv); 1048 __intel_fbc_disable(dev_priv);
1071 __intel_fbc_update(dev_priv);
1072 } 1049 }
1050 mutex_unlock(&dev_priv->fbc.lock);
1051}
1073 1052
1053/**
1054 * intel_fbc_disable - globally disable FBC
1055 * @dev_priv: i915 device instance
1056 *
1057 * This function disables FBC regardless of which CRTC is associated with it.
1058 */
1059void intel_fbc_disable(struct drm_i915_private *dev_priv)
1060{
1061 if (!fbc_supported(dev_priv))
1062 return;
1063
1064 mutex_lock(&dev_priv->fbc.lock);
1065 if (dev_priv->fbc.enabled)
1066 __intel_fbc_disable(dev_priv);
1074 mutex_unlock(&dev_priv->fbc.lock); 1067 mutex_unlock(&dev_priv->fbc.lock);
1075} 1068}
1076 1069
@@ -1084,11 +1077,14 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
1084{ 1077{
1085 enum pipe pipe; 1078 enum pipe pipe;
1086 1079
1080 INIT_WORK(&dev_priv->fbc.work.work, intel_fbc_work_fn);
1087 mutex_init(&dev_priv->fbc.lock); 1081 mutex_init(&dev_priv->fbc.lock);
1082 dev_priv->fbc.enabled = false;
1083 dev_priv->fbc.active = false;
1084 dev_priv->fbc.work.scheduled = false;
1088 1085
1089 if (!HAS_FBC(dev_priv)) { 1086 if (!HAS_FBC(dev_priv)) {
1090 dev_priv->fbc.enabled = false; 1087 dev_priv->fbc.no_fbc_reason = "unsupported by this chipset";
1091 dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED;
1092 return; 1088 return;
1093 } 1089 }
1094 1090
@@ -1096,30 +1092,34 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
1096 dev_priv->fbc.possible_framebuffer_bits |= 1092 dev_priv->fbc.possible_framebuffer_bits |=
1097 INTEL_FRONTBUFFER_PRIMARY(pipe); 1093 INTEL_FRONTBUFFER_PRIMARY(pipe);
1098 1094
1099 if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8) 1095 if (fbc_on_pipe_a_only(dev_priv))
1100 break; 1096 break;
1101 } 1097 }
1102 1098
1103 if (INTEL_INFO(dev_priv)->gen >= 7) { 1099 if (INTEL_INFO(dev_priv)->gen >= 7) {
1104 dev_priv->fbc.fbc_enabled = ilk_fbc_enabled; 1100 dev_priv->fbc.is_active = ilk_fbc_is_active;
1105 dev_priv->fbc.enable_fbc = gen7_fbc_enable; 1101 dev_priv->fbc.activate = gen7_fbc_activate;
1106 dev_priv->fbc.disable_fbc = ilk_fbc_disable; 1102 dev_priv->fbc.deactivate = ilk_fbc_deactivate;
1107 } else if (INTEL_INFO(dev_priv)->gen >= 5) { 1103 } else if (INTEL_INFO(dev_priv)->gen >= 5) {
1108 dev_priv->fbc.fbc_enabled = ilk_fbc_enabled; 1104 dev_priv->fbc.is_active = ilk_fbc_is_active;
1109 dev_priv->fbc.enable_fbc = ilk_fbc_enable; 1105 dev_priv->fbc.activate = ilk_fbc_activate;
1110 dev_priv->fbc.disable_fbc = ilk_fbc_disable; 1106 dev_priv->fbc.deactivate = ilk_fbc_deactivate;
1111 } else if (IS_GM45(dev_priv)) { 1107 } else if (IS_GM45(dev_priv)) {
1112 dev_priv->fbc.fbc_enabled = g4x_fbc_enabled; 1108 dev_priv->fbc.is_active = g4x_fbc_is_active;
1113 dev_priv->fbc.enable_fbc = g4x_fbc_enable; 1109 dev_priv->fbc.activate = g4x_fbc_activate;
1114 dev_priv->fbc.disable_fbc = g4x_fbc_disable; 1110 dev_priv->fbc.deactivate = g4x_fbc_deactivate;
1115 } else { 1111 } else {
1116 dev_priv->fbc.fbc_enabled = i8xx_fbc_enabled; 1112 dev_priv->fbc.is_active = i8xx_fbc_is_active;
1117 dev_priv->fbc.enable_fbc = i8xx_fbc_enable; 1113 dev_priv->fbc.activate = i8xx_fbc_activate;
1118 dev_priv->fbc.disable_fbc = i8xx_fbc_disable; 1114 dev_priv->fbc.deactivate = i8xx_fbc_deactivate;
1119 1115
1120 /* This value was pulled out of someone's hat */ 1116 /* This value was pulled out of someone's hat */
1121 I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT); 1117 I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
1122 } 1118 }
1123 1119
1124 dev_priv->fbc.enabled = dev_priv->fbc.fbc_enabled(dev_priv); 1120 /* We still don't have any sort of hardware state readout for FBC, so
1121 * deactivate it in case the BIOS activated it to make sure software
1122 * matches the hardware state. */
1123 if (dev_priv->fbc.is_active(dev_priv))
1124 dev_priv->fbc.deactivate(dev_priv);
1125} 1125}
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 4fd5fdfef6bd..7ccde58f8c98 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -119,7 +119,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
119{ 119{
120 struct intel_fbdev *ifbdev = 120 struct intel_fbdev *ifbdev =
121 container_of(helper, struct intel_fbdev, helper); 121 container_of(helper, struct intel_fbdev, helper);
122 struct drm_framebuffer *fb; 122 struct drm_framebuffer *fb = NULL;
123 struct drm_device *dev = helper->dev; 123 struct drm_device *dev = helper->dev;
124 struct drm_i915_private *dev_priv = to_i915(dev); 124 struct drm_i915_private *dev_priv = to_i915(dev);
125 struct drm_mode_fb_cmd2 mode_cmd = {}; 125 struct drm_mode_fb_cmd2 mode_cmd = {};
@@ -138,6 +138,8 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
138 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, 138 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
139 sizes->surface_depth); 139 sizes->surface_depth);
140 140
141 mutex_lock(&dev->struct_mutex);
142
141 size = mode_cmd.pitches[0] * mode_cmd.height; 143 size = mode_cmd.pitches[0] * mode_cmd.height;
142 size = PAGE_ALIGN(size); 144 size = PAGE_ALIGN(size);
143 145
@@ -156,26 +158,28 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
156 158
157 fb = __intel_framebuffer_create(dev, &mode_cmd, obj); 159 fb = __intel_framebuffer_create(dev, &mode_cmd, obj);
158 if (IS_ERR(fb)) { 160 if (IS_ERR(fb)) {
161 drm_gem_object_unreference(&obj->base);
159 ret = PTR_ERR(fb); 162 ret = PTR_ERR(fb);
160 goto out_unref; 163 goto out;
161 } 164 }
162 165
163 /* Flush everything out, we'll be doing GTT only from now on */ 166 /* Flush everything out, we'll be doing GTT only from now on */
164 ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL, NULL, NULL); 167 ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL);
165 if (ret) { 168 if (ret) {
166 DRM_ERROR("failed to pin obj: %d\n", ret); 169 DRM_ERROR("failed to pin obj: %d\n", ret);
167 goto out_fb; 170 goto out;
168 } 171 }
169 172
173 mutex_unlock(&dev->struct_mutex);
174
170 ifbdev->fb = to_intel_framebuffer(fb); 175 ifbdev->fb = to_intel_framebuffer(fb);
171 176
172 return 0; 177 return 0;
173 178
174out_fb:
175 drm_framebuffer_remove(fb);
176out_unref:
177 drm_gem_object_unreference(&obj->base);
178out: 179out:
180 mutex_unlock(&dev->struct_mutex);
181 if (!IS_ERR_OR_NULL(fb))
182 drm_framebuffer_unreference(fb);
179 return ret; 183 return ret;
180} 184}
181 185
@@ -193,8 +197,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
193 int size, ret; 197 int size, ret;
194 bool prealloc = false; 198 bool prealloc = false;
195 199
196 mutex_lock(&dev->struct_mutex);
197
198 if (intel_fb && 200 if (intel_fb &&
199 (sizes->fb_width > intel_fb->base.width || 201 (sizes->fb_width > intel_fb->base.width ||
200 sizes->fb_height > intel_fb->base.height)) { 202 sizes->fb_height > intel_fb->base.height)) {
@@ -209,7 +211,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
209 DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n"); 211 DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
210 ret = intelfb_alloc(helper, sizes); 212 ret = intelfb_alloc(helper, sizes);
211 if (ret) 213 if (ret)
212 goto out_unlock; 214 return ret;
213 intel_fb = ifbdev->fb; 215 intel_fb = ifbdev->fb;
214 } else { 216 } else {
215 DRM_DEBUG_KMS("re-using BIOS fb\n"); 217 DRM_DEBUG_KMS("re-using BIOS fb\n");
@@ -221,8 +223,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
221 obj = intel_fb->obj; 223 obj = intel_fb->obj;
222 size = obj->base.size; 224 size = obj->base.size;
223 225
226 mutex_lock(&dev->struct_mutex);
227
224 info = drm_fb_helper_alloc_fbi(helper); 228 info = drm_fb_helper_alloc_fbi(helper);
225 if (IS_ERR(info)) { 229 if (IS_ERR(info)) {
230 DRM_ERROR("Failed to allocate fb_info\n");
226 ret = PTR_ERR(info); 231 ret = PTR_ERR(info);
227 goto out_unpin; 232 goto out_unpin;
228 } 233 }
@@ -249,6 +254,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
249 ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj), 254 ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
250 size); 255 size);
251 if (!info->screen_base) { 256 if (!info->screen_base) {
257 DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
252 ret = -ENOSPC; 258 ret = -ENOSPC;
253 goto out_destroy_fbi; 259 goto out_destroy_fbi;
254 } 260 }
@@ -281,8 +287,6 @@ out_destroy_fbi:
281 drm_fb_helper_release_fbi(helper); 287 drm_fb_helper_release_fbi(helper);
282out_unpin: 288out_unpin:
283 i915_gem_object_ggtt_unpin(obj); 289 i915_gem_object_ggtt_unpin(obj);
284 drm_gem_object_unreference(&obj->base);
285out_unlock:
286 mutex_unlock(&dev->struct_mutex); 290 mutex_unlock(&dev->struct_mutex);
287 return ret; 291 return ret;
288} 292}
@@ -526,8 +530,10 @@ static void intel_fbdev_destroy(struct drm_device *dev,
526 530
527 drm_fb_helper_fini(&ifbdev->helper); 531 drm_fb_helper_fini(&ifbdev->helper);
528 532
529 drm_framebuffer_unregister_private(&ifbdev->fb->base); 533 if (ifbdev->fb) {
530 drm_framebuffer_remove(&ifbdev->fb->base); 534 drm_framebuffer_unregister_private(&ifbdev->fb->base);
535 drm_framebuffer_remove(&ifbdev->fb->base);
536 }
531} 537}
532 538
533/* 539/*
@@ -702,13 +708,20 @@ int intel_fbdev_init(struct drm_device *dev)
702 return 0; 708 return 0;
703} 709}
704 710
705void intel_fbdev_initial_config(void *data, async_cookie_t cookie) 711static void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
706{ 712{
707 struct drm_i915_private *dev_priv = data; 713 struct drm_i915_private *dev_priv = data;
708 struct intel_fbdev *ifbdev = dev_priv->fbdev; 714 struct intel_fbdev *ifbdev = dev_priv->fbdev;
709 715
710 /* Due to peculiar init order wrt to hpd handling this is separate. */ 716 /* Due to peculiar init order wrt to hpd handling this is separate. */
711 drm_fb_helper_initial_config(&ifbdev->helper, ifbdev->preferred_bpp); 717 if (drm_fb_helper_initial_config(&ifbdev->helper,
718 ifbdev->preferred_bpp))
719 intel_fbdev_fini(dev_priv->dev);
720}
721
722void intel_fbdev_initial_config_async(struct drm_device *dev)
723{
724 async_schedule(intel_fbdev_initial_config, to_i915(dev));
712} 725}
713 726
714void intel_fbdev_fini(struct drm_device *dev) 727void intel_fbdev_fini(struct drm_device *dev)
@@ -719,7 +732,8 @@ void intel_fbdev_fini(struct drm_device *dev)
719 732
720 flush_work(&dev_priv->fbdev_suspend_work); 733 flush_work(&dev_priv->fbdev_suspend_work);
721 734
722 async_synchronize_full(); 735 if (!current_is_async())
736 async_synchronize_full();
723 intel_fbdev_destroy(dev, dev_priv->fbdev); 737 intel_fbdev_destroy(dev, dev_priv->fbdev);
724 kfree(dev_priv->fbdev); 738 kfree(dev_priv->fbdev);
725 dev_priv->fbdev = NULL; 739 dev_priv->fbdev = NULL;
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index 54daa66c6970..bda526660e20 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -84,38 +84,21 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev)
84 return true; 84 return true;
85} 85}
86 86
87/** 87static void i9xx_check_fifo_underruns(struct intel_crtc *crtc)
88 * i9xx_check_fifo_underruns - check for fifo underruns
89 * @dev_priv: i915 device instance
90 *
91 * This function checks for fifo underruns on GMCH platforms. This needs to be
92 * done manually on modeset to make sure that we catch all underruns since they
93 * do not generate an interrupt by themselves on these platforms.
94 */
95void i9xx_check_fifo_underruns(struct drm_i915_private *dev_priv)
96{ 88{
97 struct intel_crtc *crtc; 89 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
98 90 i915_reg_t reg = PIPESTAT(crtc->pipe);
99 spin_lock_irq(&dev_priv->irq_lock); 91 u32 pipestat = I915_READ(reg) & 0xffff0000;
100
101 for_each_intel_crtc(dev_priv->dev, crtc) {
102 u32 reg = PIPESTAT(crtc->pipe);
103 u32 pipestat;
104
105 if (crtc->cpu_fifo_underrun_disabled)
106 continue;
107 92
108 pipestat = I915_READ(reg) & 0xffff0000; 93 assert_spin_locked(&dev_priv->irq_lock);
109 if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
110 continue;
111 94
112 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS); 95 if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
113 POSTING_READ(reg); 96 return;
114 97
115 DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe)); 98 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
116 } 99 POSTING_READ(reg);
117 100
118 spin_unlock_irq(&dev_priv->irq_lock); 101 DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
119} 102}
120 103
121static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev, 104static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
@@ -123,7 +106,7 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
123 bool enable, bool old) 106 bool enable, bool old)
124{ 107{
125 struct drm_i915_private *dev_priv = dev->dev_private; 108 struct drm_i915_private *dev_priv = dev->dev_private;
126 u32 reg = PIPESTAT(pipe); 109 i915_reg_t reg = PIPESTAT(pipe);
127 u32 pipestat = I915_READ(reg) & 0xffff0000; 110 u32 pipestat = I915_READ(reg) & 0xffff0000;
128 111
129 assert_spin_locked(&dev_priv->irq_lock); 112 assert_spin_locked(&dev_priv->irq_lock);
@@ -145,9 +128,26 @@ static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
145 DE_PIPEB_FIFO_UNDERRUN; 128 DE_PIPEB_FIFO_UNDERRUN;
146 129
147 if (enable) 130 if (enable)
148 ironlake_enable_display_irq(dev_priv, bit); 131 ilk_enable_display_irq(dev_priv, bit);
149 else 132 else
150 ironlake_disable_display_irq(dev_priv, bit); 133 ilk_disable_display_irq(dev_priv, bit);
134}
135
136static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc)
137{
138 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
139 enum pipe pipe = crtc->pipe;
140 uint32_t err_int = I915_READ(GEN7_ERR_INT);
141
142 assert_spin_locked(&dev_priv->irq_lock);
143
144 if ((err_int & ERR_INT_FIFO_UNDERRUN(pipe)) == 0)
145 return;
146
147 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
148 POSTING_READ(GEN7_ERR_INT);
149
150 DRM_ERROR("fifo underrun on pipe %c\n", pipe_name(pipe));
151} 151}
152 152
153static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, 153static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
@@ -161,9 +161,9 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
161 if (!ivb_can_enable_err_int(dev)) 161 if (!ivb_can_enable_err_int(dev))
162 return; 162 return;
163 163
164 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 164 ilk_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
165 } else { 165 } else {
166 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 166 ilk_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
167 167
168 if (old && 168 if (old &&
169 I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) { 169 I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
@@ -178,14 +178,10 @@ static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
178{ 178{
179 struct drm_i915_private *dev_priv = dev->dev_private; 179 struct drm_i915_private *dev_priv = dev->dev_private;
180 180
181 assert_spin_locked(&dev_priv->irq_lock);
182
183 if (enable) 181 if (enable)
184 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN; 182 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
185 else 183 else
186 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN; 184 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
187 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
188 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
189} 185}
190 186
191static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, 187static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
@@ -202,6 +198,24 @@ static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
202 ibx_disable_display_interrupt(dev_priv, bit); 198 ibx_disable_display_interrupt(dev_priv, bit);
203} 199}
204 200
201static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
202{
203 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
204 enum transcoder pch_transcoder = (enum transcoder) crtc->pipe;
205 uint32_t serr_int = I915_READ(SERR_INT);
206
207 assert_spin_locked(&dev_priv->irq_lock);
208
209 if ((serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) == 0)
210 return;
211
212 I915_WRITE(SERR_INT, SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
213 POSTING_READ(SERR_INT);
214
215 DRM_ERROR("pch fifo underrun on pch transcoder %c\n",
216 transcoder_name(pch_transcoder));
217}
218
205static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, 219static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
206 enum transcoder pch_transcoder, 220 enum transcoder pch_transcoder,
207 bool enable, bool old) 221 bool enable, bool old)
@@ -375,3 +389,56 @@ void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
375 DRM_ERROR("PCH transcoder %c FIFO underrun\n", 389 DRM_ERROR("PCH transcoder %c FIFO underrun\n",
376 transcoder_name(pch_transcoder)); 390 transcoder_name(pch_transcoder));
377} 391}
392
393/**
394 * intel_check_cpu_fifo_underruns - check for CPU fifo underruns immediately
395 * @dev_priv: i915 device instance
396 *
397 * Check for CPU fifo underruns immediately. Useful on IVB/HSW where the shared
398 * error interrupt may have been disabled, and so CPU fifo underruns won't
399 * necessarily raise an interrupt, and on GMCH platforms where underruns never
400 * raise an interrupt.
401 */
402void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv)
403{
404 struct intel_crtc *crtc;
405
406 spin_lock_irq(&dev_priv->irq_lock);
407
408 for_each_intel_crtc(dev_priv->dev, crtc) {
409 if (crtc->cpu_fifo_underrun_disabled)
410 continue;
411
412 if (HAS_GMCH_DISPLAY(dev_priv))
413 i9xx_check_fifo_underruns(crtc);
414 else if (IS_GEN7(dev_priv))
415 ivybridge_check_fifo_underruns(crtc);
416 }
417
418 spin_unlock_irq(&dev_priv->irq_lock);
419}
420
421/**
422 * intel_check_pch_fifo_underruns - check for PCH fifo underruns immediately
423 * @dev_priv: i915 device instance
424 *
425 * Check for PCH fifo underruns immediately. Useful on CPT/PPT where the shared
426 * error interrupt may have been disabled, and so PCH fifo underruns won't
427 * necessarily raise an interrupt.
428 */
429void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv)
430{
431 struct intel_crtc *crtc;
432
433 spin_lock_irq(&dev_priv->irq_lock);
434
435 for_each_intel_crtc(dev_priv->dev, crtc) {
436 if (crtc->pch_fifo_underrun_disabled)
437 continue;
438
439 if (HAS_PCH_CPT(dev_priv))
440 cpt_check_pch_fifo_underruns(crtc);
441 }
442
443 spin_unlock_irq(&dev_priv->irq_lock);
444}
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 081d5f648d26..822952235dcf 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -42,8 +42,6 @@ struct i915_guc_client {
42 42
43 uint32_t wq_offset; 43 uint32_t wq_offset;
44 uint32_t wq_size; 44 uint32_t wq_size;
45
46 spinlock_t wq_lock; /* Protects all data below */
47 uint32_t wq_tail; 45 uint32_t wq_tail;
48 46
49 /* GuC submission statistics & status */ 47 /* GuC submission statistics & status */
@@ -76,11 +74,17 @@ struct intel_guc_fw {
76 uint16_t guc_fw_minor_wanted; 74 uint16_t guc_fw_minor_wanted;
77 uint16_t guc_fw_major_found; 75 uint16_t guc_fw_major_found;
78 uint16_t guc_fw_minor_found; 76 uint16_t guc_fw_minor_found;
77
78 uint32_t header_size;
79 uint32_t header_offset;
80 uint32_t rsa_size;
81 uint32_t rsa_offset;
82 uint32_t ucode_size;
83 uint32_t ucode_offset;
79}; 84};
80 85
81struct intel_guc { 86struct intel_guc {
82 struct intel_guc_fw guc_fw; 87 struct intel_guc_fw guc_fw;
83
84 uint32_t log_flags; 88 uint32_t log_flags;
85 struct drm_i915_gem_object *log_obj; 89 struct drm_i915_gem_object *log_obj;
86 90
@@ -89,8 +93,6 @@ struct intel_guc {
89 93
90 struct i915_guc_client *execbuf_client; 94 struct i915_guc_client *execbuf_client;
91 95
92 spinlock_t host2guc_lock; /* Protects all data below */
93
94 DECLARE_BITMAP(doorbell_bitmap, GUC_MAX_DOORBELLS); 96 DECLARE_BITMAP(doorbell_bitmap, GUC_MAX_DOORBELLS);
95 uint32_t db_cacheline; /* Cyclic counter mod pagesize */ 97 uint32_t db_cacheline; /* Cyclic counter mod pagesize */
96 98
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 593d2f585978..40b2ea572e16 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -122,6 +122,78 @@
122 122
123#define GUC_CTL_MAX_DWORDS (GUC_CTL_RSRVD + 1) 123#define GUC_CTL_MAX_DWORDS (GUC_CTL_RSRVD + 1)
124 124
125/**
126 * DOC: GuC Firmware Layout
127 *
128 * The GuC firmware layout looks like this:
129 *
130 * +-------------------------------+
131 * | guc_css_header |
132 * | contains major/minor version |
133 * +-------------------------------+
134 * | uCode |
135 * +-------------------------------+
136 * | RSA signature |
137 * +-------------------------------+
138 * | modulus key |
139 * +-------------------------------+
140 * | exponent val |
141 * +-------------------------------+
142 *
143 * The firmware may or may not have modulus key and exponent data. The header,
144 * uCode and RSA signature are must-have components that will be used by driver.
145 * Length of each components, which is all in dwords, can be found in header.
146 * In the case that modulus and exponent are not present in fw, a.k.a truncated
147 * image, the length value still appears in header.
148 *
149 * Driver will do some basic fw size validation based on the following rules:
150 *
151 * 1. Header, uCode and RSA are must-have components.
152 * 2. All firmware components, if they present, are in the sequence illustrated
153 * in the layout table above.
154 * 3. Length info of each component can be found in header, in dwords.
155 * 4. Modulus and exponent key are not required by driver. They may not appear
156 * in fw. So driver will load a truncated firmware in this case.
157 */
158
159struct guc_css_header {
160 uint32_t module_type;
161 /* header_size includes all non-uCode bits, including css_header, rsa
162 * key, modulus key and exponent data. */
163 uint32_t header_size_dw;
164 uint32_t header_version;
165 uint32_t module_id;
166 uint32_t module_vendor;
167 union {
168 struct {
169 uint8_t day;
170 uint8_t month;
171 uint16_t year;
172 };
173 uint32_t date;
174 };
175 uint32_t size_dw; /* uCode plus header_size_dw */
176 uint32_t key_size_dw;
177 uint32_t modulus_size_dw;
178 uint32_t exponent_size_dw;
179 union {
180 struct {
181 uint8_t hour;
182 uint8_t min;
183 uint16_t sec;
184 };
185 uint32_t time;
186 };
187
188 char username[8];
189 char buildnumber[12];
190 uint32_t device_id;
191 uint32_t guc_sw_version;
192 uint32_t prod_preprod_fw;
193 uint32_t reserved[12];
194 uint32_t header_info;
195} __packed;
196
125struct guc_doorbell_info { 197struct guc_doorbell_info {
126 u32 db_status; 198 u32 db_status;
127 u32 cookie; 199 u32 cookie;
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index 3541f76c65a7..550921f2ef7d 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -31,7 +31,7 @@
31#include "intel_guc.h" 31#include "intel_guc.h"
32 32
33/** 33/**
34 * DOC: GuC 34 * DOC: GuC-specific firmware loader
35 * 35 *
36 * intel_guc: 36 * intel_guc:
37 * Top level structure of guc. It handles firmware loading and manages client 37 * Top level structure of guc. It handles firmware loading and manages client
@@ -208,16 +208,6 @@ static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
208/* 208/*
209 * Transfer the firmware image to RAM for execution by the microcontroller. 209 * Transfer the firmware image to RAM for execution by the microcontroller.
210 * 210 *
211 * GuC Firmware layout:
212 * +-------------------------------+ ----
213 * | CSS header | 128B
214 * | contains major/minor version |
215 * +-------------------------------+ ----
216 * | uCode |
217 * +-------------------------------+ ----
218 * | RSA signature | 256B
219 * +-------------------------------+ ----
220 *
221 * Architecturally, the DMA engine is bidirectional, and can potentially even 211 * Architecturally, the DMA engine is bidirectional, and can potentially even
222 * transfer between GTT locations. This functionality is left out of the API 212 * transfer between GTT locations. This functionality is left out of the API
223 * for now as there is no need for it. 213 * for now as there is no need for it.
@@ -225,33 +215,29 @@ static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
225 * Note that GuC needs the CSS header plus uKernel code to be copied by the 215 * Note that GuC needs the CSS header plus uKernel code to be copied by the
226 * DMA engine in one operation, whereas the RSA signature is loaded via MMIO. 216 * DMA engine in one operation, whereas the RSA signature is loaded via MMIO.
227 */ 217 */
228
229#define UOS_CSS_HEADER_OFFSET 0
230#define UOS_VER_MINOR_OFFSET 0x44
231#define UOS_VER_MAJOR_OFFSET 0x46
232#define UOS_CSS_HEADER_SIZE 0x80
233#define UOS_RSA_SIG_SIZE 0x100
234
235static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv) 218static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
236{ 219{
237 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; 220 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
238 struct drm_i915_gem_object *fw_obj = guc_fw->guc_fw_obj; 221 struct drm_i915_gem_object *fw_obj = guc_fw->guc_fw_obj;
239 unsigned long offset; 222 unsigned long offset;
240 struct sg_table *sg = fw_obj->pages; 223 struct sg_table *sg = fw_obj->pages;
241 u32 status, ucode_size, rsa[UOS_RSA_SIG_SIZE / sizeof(u32)]; 224 u32 status, rsa[UOS_RSA_SCRATCH_MAX_COUNT];
242 int i, ret = 0; 225 int i, ret = 0;
243 226
244 /* uCode size, also is where RSA signature starts */ 227 /* where RSA signature starts */
245 offset = ucode_size = guc_fw->guc_fw_size - UOS_RSA_SIG_SIZE; 228 offset = guc_fw->rsa_offset;
246 I915_WRITE(DMA_COPY_SIZE, ucode_size);
247 229
248 /* Copy RSA signature from the fw image to HW for verification */ 230 /* Copy RSA signature from the fw image to HW for verification */
249 sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, UOS_RSA_SIG_SIZE, offset); 231 sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, sizeof(rsa), offset);
250 for (i = 0; i < UOS_RSA_SIG_SIZE / sizeof(u32); i++) 232 for (i = 0; i < UOS_RSA_SCRATCH_MAX_COUNT; i++)
251 I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]); 233 I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
252 234
235 /* The header plus uCode will be copied to WOPCM via DMA, excluding any
236 * other components */
237 I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
238
253 /* Set the source address for the new blob */ 239 /* Set the source address for the new blob */
254 offset = i915_gem_obj_ggtt_offset(fw_obj); 240 offset = i915_gem_obj_ggtt_offset(fw_obj) + guc_fw->header_offset;
255 I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset)); 241 I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
256 I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF); 242 I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
257 243
@@ -322,8 +308,8 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
322 I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE); 308 I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE);
323 309
324 /* WaDisableMinuteIaClockGating:skl,bxt */ 310 /* WaDisableMinuteIaClockGating:skl,bxt */
325 if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) || 311 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
326 (IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) { 312 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
327 I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) & 313 I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) &
328 ~GUC_ENABLE_MIA_CLOCK_GATING)); 314 ~GUC_ENABLE_MIA_CLOCK_GATING));
329 } 315 }
@@ -378,6 +364,9 @@ int intel_guc_ucode_load(struct drm_device *dev)
378 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; 364 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
379 int err = 0; 365 int err = 0;
380 366
367 if (!i915.enable_guc_submission)
368 return 0;
369
381 DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n", 370 DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
382 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status), 371 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
383 intel_guc_fw_status_repr(guc_fw->guc_fw_load_status)); 372 intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
@@ -457,10 +446,8 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
457{ 446{
458 struct drm_i915_gem_object *obj; 447 struct drm_i915_gem_object *obj;
459 const struct firmware *fw; 448 const struct firmware *fw;
460 const u8 *css_header; 449 struct guc_css_header *css;
461 const size_t minsize = UOS_CSS_HEADER_SIZE + UOS_RSA_SIG_SIZE; 450 size_t size;
462 const size_t maxsize = GUC_WOPCM_SIZE_VALUE + UOS_RSA_SIG_SIZE
463 - 0x8000; /* 32k reserved (8K stack + 24k context) */
464 int err; 451 int err;
465 452
466 DRM_DEBUG_DRIVER("before requesting firmware: GuC fw fetch status %s\n", 453 DRM_DEBUG_DRIVER("before requesting firmware: GuC fw fetch status %s\n",
@@ -474,12 +461,52 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
474 461
475 DRM_DEBUG_DRIVER("fetch GuC fw from %s succeeded, fw %p\n", 462 DRM_DEBUG_DRIVER("fetch GuC fw from %s succeeded, fw %p\n",
476 guc_fw->guc_fw_path, fw); 463 guc_fw->guc_fw_path, fw);
477 DRM_DEBUG_DRIVER("firmware file size %zu (minimum %zu, maximum %zu)\n",
478 fw->size, minsize, maxsize);
479 464
480 /* Check the size of the blob befoe examining buffer contents */ 465 /* Check the size of the blob before examining buffer contents */
481 if (fw->size < minsize || fw->size > maxsize) 466 if (fw->size < sizeof(struct guc_css_header)) {
467 DRM_ERROR("Firmware header is missing\n");
482 goto fail; 468 goto fail;
469 }
470
471 css = (struct guc_css_header *)fw->data;
472
473 /* Firmware bits always start from header */
474 guc_fw->header_offset = 0;
475 guc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
476 css->key_size_dw - css->exponent_size_dw) * sizeof(u32);
477
478 if (guc_fw->header_size != sizeof(struct guc_css_header)) {
479 DRM_ERROR("CSS header definition mismatch\n");
480 goto fail;
481 }
482
483 /* then, uCode */
484 guc_fw->ucode_offset = guc_fw->header_offset + guc_fw->header_size;
485 guc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
486
487 /* now RSA */
488 if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
489 DRM_ERROR("RSA key size is bad\n");
490 goto fail;
491 }
492 guc_fw->rsa_offset = guc_fw->ucode_offset + guc_fw->ucode_size;
493 guc_fw->rsa_size = css->key_size_dw * sizeof(u32);
494
495 /* At least, it should have header, uCode and RSA. Size of all three. */
496 size = guc_fw->header_size + guc_fw->ucode_size + guc_fw->rsa_size;
497 if (fw->size < size) {
498 DRM_ERROR("Missing firmware components\n");
499 goto fail;
500 }
501
502 /* Header and uCode will be loaded to WOPCM. Size of the two. */
503 size = guc_fw->header_size + guc_fw->ucode_size;
504
505 /* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
506 if (size > GUC_WOPCM_SIZE_VALUE - 0x8000) {
507 DRM_ERROR("Firmware is too large to fit in WOPCM\n");
508 goto fail;
509 }
483 510
484 /* 511 /*
485 * The GuC firmware image has the version number embedded at a well-known 512 * The GuC firmware image has the version number embedded at a well-known
@@ -487,9 +514,8 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
487 * TWO bytes each (i.e. u16), although all pointers and offsets are defined 514 * TWO bytes each (i.e. u16), although all pointers and offsets are defined
488 * in terms of bytes (u8). 515 * in terms of bytes (u8).
489 */ 516 */
490 css_header = fw->data + UOS_CSS_HEADER_OFFSET; 517 guc_fw->guc_fw_major_found = css->guc_sw_version >> 16;
491 guc_fw->guc_fw_major_found = *(u16 *)(css_header + UOS_VER_MAJOR_OFFSET); 518 guc_fw->guc_fw_minor_found = css->guc_sw_version & 0xFFFF;
492 guc_fw->guc_fw_minor_found = *(u16 *)(css_header + UOS_VER_MINOR_OFFSET);
493 519
494 if (guc_fw->guc_fw_major_found != guc_fw->guc_fw_major_wanted || 520 if (guc_fw->guc_fw_major_found != guc_fw->guc_fw_major_wanted ||
495 guc_fw->guc_fw_minor_found < guc_fw->guc_fw_minor_wanted) { 521 guc_fw->guc_fw_minor_found < guc_fw->guc_fw_minor_wanted) {
@@ -566,6 +592,9 @@ void intel_guc_ucode_init(struct drm_device *dev)
566 fw_path = ""; /* unknown device */ 592 fw_path = ""; /* unknown device */
567 } 593 }
568 594
595 if (!i915.enable_guc_submission)
596 return;
597
569 guc_fw->guc_dev = dev; 598 guc_fw->guc_dev = dev;
570 guc_fw->guc_fw_path = fw_path; 599 guc_fw->guc_fw_path = fw_path;
571 guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE; 600 guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 9eafa191cee2..00d065fee506 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -113,10 +113,11 @@ static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type)
113 } 113 }
114} 114}
115 115
116static u32 hsw_dip_data_reg(struct drm_i915_private *dev_priv, 116static i915_reg_t
117 enum transcoder cpu_transcoder, 117hsw_dip_data_reg(struct drm_i915_private *dev_priv,
118 enum hdmi_infoframe_type type, 118 enum transcoder cpu_transcoder,
119 int i) 119 enum hdmi_infoframe_type type,
120 int i)
120{ 121{
121 switch (type) { 122 switch (type) {
122 case HDMI_INFOFRAME_TYPE_AVI: 123 case HDMI_INFOFRAME_TYPE_AVI:
@@ -127,7 +128,7 @@ static u32 hsw_dip_data_reg(struct drm_i915_private *dev_priv,
127 return HSW_TVIDEO_DIP_VS_DATA(cpu_transcoder, i); 128 return HSW_TVIDEO_DIP_VS_DATA(cpu_transcoder, i);
128 default: 129 default:
129 DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); 130 DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
130 return 0; 131 return INVALID_MMIO_REG;
131 } 132 }
132} 133}
133 134
@@ -168,10 +169,10 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
168 POSTING_READ(VIDEO_DIP_CTL); 169 POSTING_READ(VIDEO_DIP_CTL);
169} 170}
170 171
171static bool g4x_infoframe_enabled(struct drm_encoder *encoder) 172static bool g4x_infoframe_enabled(struct drm_encoder *encoder,
173 const struct intel_crtc_state *pipe_config)
172{ 174{
173 struct drm_device *dev = encoder->dev; 175 struct drm_i915_private *dev_priv = to_i915(encoder->dev);
174 struct drm_i915_private *dev_priv = dev->dev_private;
175 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 176 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
176 u32 val = I915_READ(VIDEO_DIP_CTL); 177 u32 val = I915_READ(VIDEO_DIP_CTL);
177 178
@@ -193,8 +194,9 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
193 struct drm_device *dev = encoder->dev; 194 struct drm_device *dev = encoder->dev;
194 struct drm_i915_private *dev_priv = dev->dev_private; 195 struct drm_i915_private *dev_priv = dev->dev_private;
195 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 196 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
196 int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe); 197 i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
197 u32 val = I915_READ(reg); 198 u32 val = I915_READ(reg);
199 int i;
198 200
199 WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); 201 WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
200 202
@@ -223,13 +225,13 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
223 POSTING_READ(reg); 225 POSTING_READ(reg);
224} 226}
225 227
226static bool ibx_infoframe_enabled(struct drm_encoder *encoder) 228static bool ibx_infoframe_enabled(struct drm_encoder *encoder,
229 const struct intel_crtc_state *pipe_config)
227{ 230{
228 struct drm_device *dev = encoder->dev; 231 struct drm_i915_private *dev_priv = to_i915(encoder->dev);
229 struct drm_i915_private *dev_priv = dev->dev_private;
230 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
231 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 232 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
232 int reg = TVIDEO_DIP_CTL(intel_crtc->pipe); 233 enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
234 i915_reg_t reg = TVIDEO_DIP_CTL(pipe);
233 u32 val = I915_READ(reg); 235 u32 val = I915_READ(reg);
234 236
235 if ((val & VIDEO_DIP_ENABLE) == 0) 237 if ((val & VIDEO_DIP_ENABLE) == 0)
@@ -251,8 +253,9 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
251 struct drm_device *dev = encoder->dev; 253 struct drm_device *dev = encoder->dev;
252 struct drm_i915_private *dev_priv = dev->dev_private; 254 struct drm_i915_private *dev_priv = dev->dev_private;
253 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 255 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
254 int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe); 256 i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
255 u32 val = I915_READ(reg); 257 u32 val = I915_READ(reg);
258 int i;
256 259
257 WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); 260 WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
258 261
@@ -284,13 +287,12 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
284 POSTING_READ(reg); 287 POSTING_READ(reg);
285} 288}
286 289
287static bool cpt_infoframe_enabled(struct drm_encoder *encoder) 290static bool cpt_infoframe_enabled(struct drm_encoder *encoder,
291 const struct intel_crtc_state *pipe_config)
288{ 292{
289 struct drm_device *dev = encoder->dev; 293 struct drm_i915_private *dev_priv = to_i915(encoder->dev);
290 struct drm_i915_private *dev_priv = dev->dev_private; 294 enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
291 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 295 u32 val = I915_READ(TVIDEO_DIP_CTL(pipe));
292 int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
293 u32 val = I915_READ(reg);
294 296
295 if ((val & VIDEO_DIP_ENABLE) == 0) 297 if ((val & VIDEO_DIP_ENABLE) == 0)
296 return false; 298 return false;
@@ -308,8 +310,9 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
308 struct drm_device *dev = encoder->dev; 310 struct drm_device *dev = encoder->dev;
309 struct drm_i915_private *dev_priv = dev->dev_private; 311 struct drm_i915_private *dev_priv = dev->dev_private;
310 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 312 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
311 int i, reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); 313 i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
312 u32 val = I915_READ(reg); 314 u32 val = I915_READ(reg);
315 int i;
313 316
314 WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); 317 WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
315 318
@@ -338,14 +341,13 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
338 POSTING_READ(reg); 341 POSTING_READ(reg);
339} 342}
340 343
341static bool vlv_infoframe_enabled(struct drm_encoder *encoder) 344static bool vlv_infoframe_enabled(struct drm_encoder *encoder,
345 const struct intel_crtc_state *pipe_config)
342{ 346{
343 struct drm_device *dev = encoder->dev; 347 struct drm_i915_private *dev_priv = to_i915(encoder->dev);
344 struct drm_i915_private *dev_priv = dev->dev_private;
345 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
346 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 348 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
347 int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); 349 enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
348 u32 val = I915_READ(reg); 350 u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(pipe));
349 351
350 if ((val & VIDEO_DIP_ENABLE) == 0) 352 if ((val & VIDEO_DIP_ENABLE) == 0)
351 return false; 353 return false;
@@ -367,13 +369,13 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
367 struct drm_i915_private *dev_priv = dev->dev_private; 369 struct drm_i915_private *dev_priv = dev->dev_private;
368 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 370 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
369 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 371 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
370 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder); 372 i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
371 u32 data_reg; 373 i915_reg_t data_reg;
372 int i; 374 int i;
373 u32 val = I915_READ(ctl_reg); 375 u32 val = I915_READ(ctl_reg);
374 376
375 data_reg = hsw_dip_data_reg(dev_priv, cpu_transcoder, type, 0); 377 data_reg = hsw_dip_data_reg(dev_priv, cpu_transcoder, type, 0);
376 if (data_reg == 0) 378 if (i915_mmio_reg_valid(data_reg))
377 return; 379 return;
378 380
379 val &= ~hsw_infoframe_enable(type); 381 val &= ~hsw_infoframe_enable(type);
@@ -396,13 +398,11 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
396 POSTING_READ(ctl_reg); 398 POSTING_READ(ctl_reg);
397} 399}
398 400
399static bool hsw_infoframe_enabled(struct drm_encoder *encoder) 401static bool hsw_infoframe_enabled(struct drm_encoder *encoder,
402 const struct intel_crtc_state *pipe_config)
400{ 403{
401 struct drm_device *dev = encoder->dev; 404 struct drm_i915_private *dev_priv = to_i915(encoder->dev);
402 struct drm_i915_private *dev_priv = dev->dev_private; 405 u32 val = I915_READ(HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder));
403 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
404 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
405 u32 val = I915_READ(ctl_reg);
406 406
407 return val & (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW | 407 return val & (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
408 VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW | 408 VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
@@ -513,7 +513,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
513 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 513 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
514 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 514 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
515 struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; 515 struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
516 u32 reg = VIDEO_DIP_CTL; 516 i915_reg_t reg = VIDEO_DIP_CTL;
517 u32 val = I915_READ(reg); 517 u32 val = I915_READ(reg);
518 u32 port = VIDEO_DIP_PORT(intel_dig_port->port); 518 u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
519 519
@@ -633,7 +633,8 @@ static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder)
633{ 633{
634 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 634 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
635 struct intel_crtc *crtc = to_intel_crtc(encoder->crtc); 635 struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
636 u32 reg, val = 0; 636 i915_reg_t reg;
637 u32 val = 0;
637 638
638 if (HAS_DDI(dev_priv)) 639 if (HAS_DDI(dev_priv))
639 reg = HSW_TVIDEO_DIP_GCP(crtc->config->cpu_transcoder); 640 reg = HSW_TVIDEO_DIP_GCP(crtc->config->cpu_transcoder);
@@ -666,7 +667,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
666 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 667 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
667 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 668 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
668 struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; 669 struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
669 u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe); 670 i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
670 u32 val = I915_READ(reg); 671 u32 val = I915_READ(reg);
671 u32 port = VIDEO_DIP_PORT(intel_dig_port->port); 672 u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
672 673
@@ -717,7 +718,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
717 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 718 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
718 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 719 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
719 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 720 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
720 u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe); 721 i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
721 u32 val = I915_READ(reg); 722 u32 val = I915_READ(reg);
722 723
723 assert_hdmi_port_disabled(intel_hdmi); 724 assert_hdmi_port_disabled(intel_hdmi);
@@ -760,7 +761,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
760 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 761 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
761 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 762 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
762 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 763 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
763 u32 reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); 764 i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
764 u32 val = I915_READ(reg); 765 u32 val = I915_READ(reg);
765 u32 port = VIDEO_DIP_PORT(intel_dig_port->port); 766 u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
766 767
@@ -811,7 +812,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
811 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 812 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
812 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 813 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
813 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 814 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
814 u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder); 815 i915_reg_t reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
815 u32 val = I915_READ(reg); 816 u32 val = I915_READ(reg);
816 817
817 assert_hdmi_port_disabled(intel_hdmi); 818 assert_hdmi_port_disabled(intel_hdmi);
@@ -925,7 +926,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
925 if (tmp & HDMI_MODE_SELECT_HDMI) 926 if (tmp & HDMI_MODE_SELECT_HDMI)
926 pipe_config->has_hdmi_sink = true; 927 pipe_config->has_hdmi_sink = true;
927 928
928 if (intel_hdmi->infoframe_enabled(&encoder->base)) 929 if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config))
929 pipe_config->has_infoframe = true; 930 pipe_config->has_infoframe = true;
930 931
931 if (tmp & SDVO_AUDIO_ENABLE) 932 if (tmp & SDVO_AUDIO_ENABLE)
@@ -1108,6 +1109,13 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
1108 * matching DP port to be enabled on transcoder A. 1109 * matching DP port to be enabled on transcoder A.
1109 */ 1110 */
1110 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B) { 1111 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B) {
1112 /*
1113 * We get CPU/PCH FIFO underruns on the other pipe when
1114 * doing the workaround. Sweep them under the rug.
1115 */
1116 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
1117 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
1118
1111 temp &= ~SDVO_PIPE_B_SELECT; 1119 temp &= ~SDVO_PIPE_B_SELECT;
1112 temp |= SDVO_ENABLE; 1120 temp |= SDVO_ENABLE;
1113 /* 1121 /*
@@ -1122,6 +1130,10 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
1122 temp &= ~SDVO_ENABLE; 1130 temp &= ~SDVO_ENABLE;
1123 I915_WRITE(intel_hdmi->hdmi_reg, temp); 1131 I915_WRITE(intel_hdmi->hdmi_reg, temp);
1124 POSTING_READ(intel_hdmi->hdmi_reg); 1132 POSTING_READ(intel_hdmi->hdmi_reg);
1133
1134 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
1135 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
1136 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
1125 } 1137 }
1126 1138
1127 intel_hdmi->set_infoframes(&encoder->base, false, NULL); 1139 intel_hdmi->set_infoframes(&encoder->base, false, NULL);
@@ -1335,21 +1347,18 @@ intel_hdmi_set_edid(struct drm_connector *connector, bool force)
1335{ 1347{
1336 struct drm_i915_private *dev_priv = to_i915(connector->dev); 1348 struct drm_i915_private *dev_priv = to_i915(connector->dev);
1337 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); 1349 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
1338 struct intel_encoder *intel_encoder =
1339 &hdmi_to_dig_port(intel_hdmi)->base;
1340 enum intel_display_power_domain power_domain;
1341 struct edid *edid = NULL; 1350 struct edid *edid = NULL;
1342 bool connected = false; 1351 bool connected = false;
1343 1352
1344 power_domain = intel_display_port_power_domain(intel_encoder); 1353 if (force) {
1345 intel_display_power_get(dev_priv, power_domain); 1354 intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
1346 1355
1347 if (force)
1348 edid = drm_get_edid(connector, 1356 edid = drm_get_edid(connector,
1349 intel_gmbus_get_adapter(dev_priv, 1357 intel_gmbus_get_adapter(dev_priv,
1350 intel_hdmi->ddc_bus)); 1358 intel_hdmi->ddc_bus));
1351 1359
1352 intel_display_power_put(dev_priv, power_domain); 1360 intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
1361 }
1353 1362
1354 to_intel_connector(connector)->detect_edid = edid; 1363 to_intel_connector(connector)->detect_edid = edid;
1355 if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) { 1364 if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
@@ -1383,6 +1392,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
1383 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 1392 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
1384 connector->base.id, connector->name); 1393 connector->base.id, connector->name);
1385 1394
1395 intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
1396
1386 while (!live_status && --retry) { 1397 while (!live_status && --retry) {
1387 live_status = intel_digital_port_connected(dev_priv, 1398 live_status = intel_digital_port_connected(dev_priv,
1388 hdmi_to_dig_port(intel_hdmi)); 1399 hdmi_to_dig_port(intel_hdmi));
@@ -1402,6 +1413,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
1402 } else 1413 } else
1403 status = connector_status_disconnected; 1414 status = connector_status_disconnected;
1404 1415
1416 intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
1417
1405 return status; 1418 return status;
1406} 1419}
1407 1420
@@ -2039,7 +2052,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
2039 * On BXT A0/A1, sw needs to activate DDIA HPD logic and 2052 * On BXT A0/A1, sw needs to activate DDIA HPD logic and
2040 * interrupts to check the external panel connection. 2053 * interrupts to check the external panel connection.
2041 */ 2054 */
2042 if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0)) 2055 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
2043 intel_encoder->hpd_pin = HPD_PORT_A; 2056 intel_encoder->hpd_pin = HPD_PORT_A;
2044 else 2057 else
2045 intel_encoder->hpd_pin = HPD_PORT_B; 2058 intel_encoder->hpd_pin = HPD_PORT_B;
@@ -2131,7 +2144,8 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
2131 } 2144 }
2132} 2145}
2133 2146
2134void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port) 2147void intel_hdmi_init(struct drm_device *dev,
2148 i915_reg_t hdmi_reg, enum port port)
2135{ 2149{
2136 struct intel_digital_port *intel_dig_port; 2150 struct intel_digital_port *intel_dig_port;
2137 struct intel_encoder *intel_encoder; 2151 struct intel_encoder *intel_encoder;
@@ -2150,7 +2164,7 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
2150 intel_encoder = &intel_dig_port->base; 2164 intel_encoder = &intel_dig_port->base;
2151 2165
2152 drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs, 2166 drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
2153 DRM_MODE_ENCODER_TMDS); 2167 DRM_MODE_ENCODER_TMDS, NULL);
2154 2168
2155 intel_encoder->compute_config = intel_hdmi_compute_config; 2169 intel_encoder->compute_config = intel_hdmi_compute_config;
2156 if (HAS_PCH_SPLIT(dev)) { 2170 if (HAS_PCH_SPLIT(dev)) {
@@ -2202,7 +2216,7 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
2202 2216
2203 intel_dig_port->port = port; 2217 intel_dig_port->port = port;
2204 intel_dig_port->hdmi.hdmi_reg = hdmi_reg; 2218 intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
2205 intel_dig_port->dp.output_reg = 0; 2219 intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
2206 2220
2207 intel_hdmi_init_connector(intel_dig_port, intel_connector); 2221 intel_hdmi_init_connector(intel_dig_port, intel_connector);
2208} 2222}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 1369fc41d039..e26e22a72e3b 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -36,7 +36,7 @@
36 36
37struct gmbus_pin { 37struct gmbus_pin {
38 const char *name; 38 const char *name;
39 int reg; 39 i915_reg_t reg;
40}; 40};
41 41
42/* Map gmbus pin pairs to names and registers. */ 42/* Map gmbus pin pairs to names and registers. */
@@ -63,9 +63,9 @@ static const struct gmbus_pin gmbus_pins_skl[] = {
63}; 63};
64 64
65static const struct gmbus_pin gmbus_pins_bxt[] = { 65static const struct gmbus_pin gmbus_pins_bxt[] = {
66 [GMBUS_PIN_1_BXT] = { "dpb", PCH_GPIOB }, 66 [GMBUS_PIN_1_BXT] = { "dpb", GPIOB },
67 [GMBUS_PIN_2_BXT] = { "dpc", PCH_GPIOC }, 67 [GMBUS_PIN_2_BXT] = { "dpc", GPIOC },
68 [GMBUS_PIN_3_BXT] = { "misc", PCH_GPIOD }, 68 [GMBUS_PIN_3_BXT] = { "misc", GPIOD },
69}; 69};
70 70
71/* pin is expected to be valid */ 71/* pin is expected to be valid */
@@ -74,7 +74,7 @@ static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv,
74{ 74{
75 if (IS_BROXTON(dev_priv)) 75 if (IS_BROXTON(dev_priv))
76 return &gmbus_pins_bxt[pin]; 76 return &gmbus_pins_bxt[pin];
77 else if (IS_SKYLAKE(dev_priv)) 77 else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
78 return &gmbus_pins_skl[pin]; 78 return &gmbus_pins_skl[pin];
79 else if (IS_BROADWELL(dev_priv)) 79 else if (IS_BROADWELL(dev_priv))
80 return &gmbus_pins_bdw[pin]; 80 return &gmbus_pins_bdw[pin];
@@ -89,14 +89,15 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
89 89
90 if (IS_BROXTON(dev_priv)) 90 if (IS_BROXTON(dev_priv))
91 size = ARRAY_SIZE(gmbus_pins_bxt); 91 size = ARRAY_SIZE(gmbus_pins_bxt);
92 else if (IS_SKYLAKE(dev_priv)) 92 else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
93 size = ARRAY_SIZE(gmbus_pins_skl); 93 size = ARRAY_SIZE(gmbus_pins_skl);
94 else if (IS_BROADWELL(dev_priv)) 94 else if (IS_BROADWELL(dev_priv))
95 size = ARRAY_SIZE(gmbus_pins_bdw); 95 size = ARRAY_SIZE(gmbus_pins_bdw);
96 else 96 else
97 size = ARRAY_SIZE(gmbus_pins); 97 size = ARRAY_SIZE(gmbus_pins);
98 98
99 return pin < size && get_gmbus_pin(dev_priv, pin)->reg; 99 return pin < size &&
100 i915_mmio_reg_valid(get_gmbus_pin(dev_priv, pin)->reg);
100} 101}
101 102
102/* Intel GPIO access functions */ 103/* Intel GPIO access functions */
@@ -240,9 +241,8 @@ intel_gpio_setup(struct intel_gmbus *bus, unsigned int pin)
240 241
241 algo = &bus->bit_algo; 242 algo = &bus->bit_algo;
242 243
243 bus->gpio_reg = dev_priv->gpio_mmio_base + 244 bus->gpio_reg = _MMIO(dev_priv->gpio_mmio_base +
244 get_gmbus_pin(dev_priv, pin)->reg; 245 i915_mmio_reg_offset(get_gmbus_pin(dev_priv, pin)->reg));
245
246 bus->adapter.algo_data = algo; 246 bus->adapter.algo_data = algo;
247 algo->setsda = set_data; 247 algo->setsda = set_data;
248 algo->setscl = set_clock; 248 algo->setscl = set_clock;
@@ -472,9 +472,7 @@ gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
472} 472}
473 473
474static int 474static int
475gmbus_xfer(struct i2c_adapter *adapter, 475do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
476 struct i2c_msg *msgs,
477 int num)
478{ 476{
479 struct intel_gmbus *bus = container_of(adapter, 477 struct intel_gmbus *bus = container_of(adapter,
480 struct intel_gmbus, 478 struct intel_gmbus,
@@ -483,14 +481,6 @@ gmbus_xfer(struct i2c_adapter *adapter,
483 int i = 0, inc, try = 0; 481 int i = 0, inc, try = 0;
484 int ret = 0; 482 int ret = 0;
485 483
486 intel_aux_display_runtime_get(dev_priv);
487 mutex_lock(&dev_priv->gmbus_mutex);
488
489 if (bus->force_bit) {
490 ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
491 goto out;
492 }
493
494retry: 484retry:
495 I915_WRITE(GMBUS0, bus->reg0); 485 I915_WRITE(GMBUS0, bus->reg0);
496 486
@@ -505,17 +495,13 @@ retry:
505 ret = gmbus_xfer_write(dev_priv, &msgs[i]); 495 ret = gmbus_xfer_write(dev_priv, &msgs[i]);
506 } 496 }
507 497
498 if (!ret)
499 ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_WAIT_PHASE,
500 GMBUS_HW_WAIT_EN);
508 if (ret == -ETIMEDOUT) 501 if (ret == -ETIMEDOUT)
509 goto timeout; 502 goto timeout;
510 if (ret == -ENXIO) 503 else if (ret)
511 goto clear_err;
512
513 ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_WAIT_PHASE,
514 GMBUS_HW_WAIT_EN);
515 if (ret == -ENXIO)
516 goto clear_err; 504 goto clear_err;
517 if (ret)
518 goto timeout;
519 } 505 }
520 506
521 /* Generate a STOP condition on the bus. Note that gmbus can't generata 507 /* Generate a STOP condition on the bus. Note that gmbus can't generata
@@ -589,13 +575,36 @@ timeout:
589 bus->adapter.name, bus->reg0 & 0xff); 575 bus->adapter.name, bus->reg0 & 0xff);
590 I915_WRITE(GMBUS0, 0); 576 I915_WRITE(GMBUS0, 0);
591 577
592 /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */ 578 /*
579 * Hardware may not support GMBUS over these pins? Try GPIO bitbanging
580 * instead. Use EAGAIN to have i2c core retry.
581 */
593 bus->force_bit = 1; 582 bus->force_bit = 1;
594 ret = i2c_bit_algo.master_xfer(adapter, msgs, num); 583 ret = -EAGAIN;
595 584
596out: 585out:
586 return ret;
587}
588
589static int
590gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
591{
592 struct intel_gmbus *bus = container_of(adapter, struct intel_gmbus,
593 adapter);
594 struct drm_i915_private *dev_priv = bus->dev_priv;
595 int ret;
596
597 intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
598 mutex_lock(&dev_priv->gmbus_mutex);
599
600 if (bus->force_bit)
601 ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
602 else
603 ret = do_gmbus_xfer(adapter, msgs, num);
604
597 mutex_unlock(&dev_priv->gmbus_mutex); 605 mutex_unlock(&dev_priv->gmbus_mutex);
598 intel_aux_display_runtime_put(dev_priv); 606 intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
607
599 return ret; 608 return ret;
600} 609}
601 610
@@ -626,12 +635,13 @@ int intel_setup_gmbus(struct drm_device *dev)
626 635
627 if (HAS_PCH_NOP(dev)) 636 if (HAS_PCH_NOP(dev))
628 return 0; 637 return 0;
629 else if (HAS_PCH_SPLIT(dev)) 638
630 dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA; 639 if (IS_VALLEYVIEW(dev))
631 else if (IS_VALLEYVIEW(dev))
632 dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE; 640 dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE;
633 else 641 else if (!HAS_GMCH_DISPLAY(dev_priv))
634 dev_priv->gpio_mmio_base = 0; 642 dev_priv->gpio_mmio_base =
643 i915_mmio_reg_offset(PCH_GPIOA) -
644 i915_mmio_reg_offset(GPIOA);
635 645
636 mutex_init(&dev_priv->gmbus_mutex); 646 mutex_init(&dev_priv->gmbus_mutex);
637 init_waitqueue_head(&dev_priv->gmbus_wait_queue); 647 init_waitqueue_head(&dev_priv->gmbus_wait_queue);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 88e12bdf79e2..4ebafab53f30 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -190,16 +190,21 @@
190#define GEN8_CTX_L3LLC_COHERENT (1<<5) 190#define GEN8_CTX_L3LLC_COHERENT (1<<5)
191#define GEN8_CTX_PRIVILEGE (1<<8) 191#define GEN8_CTX_PRIVILEGE (1<<8)
192 192
193#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) { \ 193#define ASSIGN_CTX_REG(reg_state, pos, reg, val) do { \
194 (reg_state)[(pos)+0] = i915_mmio_reg_offset(reg); \
195 (reg_state)[(pos)+1] = (val); \
196} while (0)
197
198#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \
194 const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n)); \ 199 const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n)); \
195 reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \ 200 reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \
196 reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \ 201 reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
197} 202} while (0)
198 203
199#define ASSIGN_CTX_PML4(ppgtt, reg_state) { \ 204#define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \
200 reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \ 205 reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \
201 reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \ 206 reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \
202} 207} while (0)
203 208
204enum { 209enum {
205 ADVANCED_CONTEXT = 0, 210 ADVANCED_CONTEXT = 0,
@@ -284,8 +289,8 @@ static bool disable_lite_restore_wa(struct intel_engine_cs *ring)
284{ 289{
285 struct drm_device *dev = ring->dev; 290 struct drm_device *dev = ring->dev;
286 291
287 return ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) || 292 return (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
288 (IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) && 293 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
289 (ring->id == VCS || ring->id == VCS2); 294 (ring->id == VCS || ring->id == VCS2);
290} 295}
291 296
@@ -921,7 +926,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
921 926
922 intel_logical_ring_emit(ringbuf, MI_NOOP); 927 intel_logical_ring_emit(ringbuf, MI_NOOP);
923 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1)); 928 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
924 intel_logical_ring_emit(ringbuf, INSTPM); 929 intel_logical_ring_emit_reg(ringbuf, INSTPM);
925 intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode); 930 intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
926 intel_logical_ring_advance(ringbuf); 931 intel_logical_ring_advance(ringbuf);
927 932
@@ -1096,7 +1101,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
1096 1101
1097 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count)); 1102 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
1098 for (i = 0; i < w->count; i++) { 1103 for (i = 0; i < w->count; i++) {
1099 intel_logical_ring_emit(ringbuf, w->reg[i].addr); 1104 intel_logical_ring_emit_reg(ringbuf, w->reg[i].addr);
1100 intel_logical_ring_emit(ringbuf, w->reg[i].value); 1105 intel_logical_ring_emit(ringbuf, w->reg[i].value);
1101 } 1106 }
1102 intel_logical_ring_emit(ringbuf, MI_NOOP); 1107 intel_logical_ring_emit(ringbuf, MI_NOOP);
@@ -1120,6 +1125,8 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
1120 batch[__index] = (cmd); \ 1125 batch[__index] = (cmd); \
1121 } while (0) 1126 } while (0)
1122 1127
1128#define wa_ctx_emit_reg(batch, index, reg) \
1129 wa_ctx_emit((batch), (index), i915_mmio_reg_offset(reg))
1123 1130
1124/* 1131/*
1125 * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after 1132 * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
@@ -1149,17 +1156,17 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
1149 * this batch updates GEN8_L3SQCREG4 with default value we need to 1156 * this batch updates GEN8_L3SQCREG4 with default value we need to
1150 * set this bit here to retain the WA during flush. 1157 * set this bit here to retain the WA during flush.
1151 */ 1158 */
1152 if (IS_SKYLAKE(ring->dev) && INTEL_REVID(ring->dev) <= SKL_REVID_E0) 1159 if (IS_SKL_REVID(ring->dev, 0, SKL_REVID_E0))
1153 l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS; 1160 l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
1154 1161
1155 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | 1162 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
1156 MI_SRM_LRM_GLOBAL_GTT)); 1163 MI_SRM_LRM_GLOBAL_GTT));
1157 wa_ctx_emit(batch, index, GEN8_L3SQCREG4); 1164 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
1158 wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256); 1165 wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
1159 wa_ctx_emit(batch, index, 0); 1166 wa_ctx_emit(batch, index, 0);
1160 1167
1161 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); 1168 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
1162 wa_ctx_emit(batch, index, GEN8_L3SQCREG4); 1169 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
1163 wa_ctx_emit(batch, index, l3sqc4_flush); 1170 wa_ctx_emit(batch, index, l3sqc4_flush);
1164 1171
1165 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6)); 1172 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
@@ -1172,7 +1179,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
1172 1179
1173 wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 | 1180 wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
1174 MI_SRM_LRM_GLOBAL_GTT)); 1181 MI_SRM_LRM_GLOBAL_GTT));
1175 wa_ctx_emit(batch, index, GEN8_L3SQCREG4); 1182 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
1176 wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256); 1183 wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
1177 wa_ctx_emit(batch, index, 0); 1184 wa_ctx_emit(batch, index, 0);
1178 1185
@@ -1314,8 +1321,8 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring,
1314 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); 1321 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1315 1322
1316 /* WaDisableCtxRestoreArbitration:skl,bxt */ 1323 /* WaDisableCtxRestoreArbitration:skl,bxt */
1317 if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_D0)) || 1324 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
1318 (IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0))) 1325 IS_BXT_REVID(dev, 0, BXT_REVID_A1))
1319 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE); 1326 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
1320 1327
1321 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */ 1328 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
@@ -1340,18 +1347,18 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *ring,
1340 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); 1347 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1341 1348
1342 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ 1349 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
1343 if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_B0)) || 1350 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
1344 (IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0))) { 1351 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
1345 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); 1352 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
1346 wa_ctx_emit(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0); 1353 wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
1347 wa_ctx_emit(batch, index, 1354 wa_ctx_emit(batch, index,
1348 _MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING)); 1355 _MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING));
1349 wa_ctx_emit(batch, index, MI_NOOP); 1356 wa_ctx_emit(batch, index, MI_NOOP);
1350 } 1357 }
1351 1358
1352 /* WaDisableCtxRestoreArbitration:skl,bxt */ 1359 /* WaDisableCtxRestoreArbitration:skl,bxt */
1353 if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_D0)) || 1360 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
1354 (IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0))) 1361 IS_BXT_REVID(dev, 0, BXT_REVID_A1))
1355 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE); 1362 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
1356 1363
1357 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END); 1364 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
@@ -1472,12 +1479,6 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
1472 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); 1479 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
1473 I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); 1480 I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
1474 1481
1475 if (ring->status_page.obj) {
1476 I915_WRITE(RING_HWS_PGA(ring->mmio_base),
1477 (u32)ring->status_page.gfx_addr);
1478 POSTING_READ(RING_HWS_PGA(ring->mmio_base));
1479 }
1480
1481 I915_WRITE(RING_MODE_GEN7(ring), 1482 I915_WRITE(RING_MODE_GEN7(ring),
1482 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) | 1483 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
1483 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); 1484 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
@@ -1562,9 +1563,9 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
1562 for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) { 1563 for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
1563 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); 1564 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1564 1565
1565 intel_logical_ring_emit(ringbuf, GEN8_RING_PDP_UDW(ring, i)); 1566 intel_logical_ring_emit_reg(ringbuf, GEN8_RING_PDP_UDW(ring, i));
1566 intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr)); 1567 intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
1567 intel_logical_ring_emit(ringbuf, GEN8_RING_PDP_LDW(ring, i)); 1568 intel_logical_ring_emit_reg(ringbuf, GEN8_RING_PDP_LDW(ring, i));
1568 intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr)); 1569 intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
1569 } 1570 }
1570 1571
@@ -1923,6 +1924,7 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
1923 i915_gem_batch_pool_init(dev, &ring->batch_pool); 1924 i915_gem_batch_pool_init(dev, &ring->batch_pool);
1924 init_waitqueue_head(&ring->irq_queue); 1925 init_waitqueue_head(&ring->irq_queue);
1925 1926
1927 INIT_LIST_HEAD(&ring->buffers);
1926 INIT_LIST_HEAD(&ring->execlist_queue); 1928 INIT_LIST_HEAD(&ring->execlist_queue);
1927 INIT_LIST_HEAD(&ring->execlist_retired_req_list); 1929 INIT_LIST_HEAD(&ring->execlist_retired_req_list);
1928 spin_lock_init(&ring->execlist_lock); 1930 spin_lock_init(&ring->execlist_lock);
@@ -1972,7 +1974,7 @@ static int logical_render_ring_init(struct drm_device *dev)
1972 ring->init_hw = gen8_init_render_ring; 1974 ring->init_hw = gen8_init_render_ring;
1973 ring->init_context = gen8_init_rcs_context; 1975 ring->init_context = gen8_init_rcs_context;
1974 ring->cleanup = intel_fini_pipe_control; 1976 ring->cleanup = intel_fini_pipe_control;
1975 if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) { 1977 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
1976 ring->get_seqno = bxt_a_get_seqno; 1978 ring->get_seqno = bxt_a_get_seqno;
1977 ring->set_seqno = bxt_a_set_seqno; 1979 ring->set_seqno = bxt_a_set_seqno;
1978 } else { 1980 } else {
@@ -2024,7 +2026,7 @@ static int logical_bsd_ring_init(struct drm_device *dev)
2024 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; 2026 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
2025 2027
2026 ring->init_hw = gen8_init_common_ring; 2028 ring->init_hw = gen8_init_common_ring;
2027 if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) { 2029 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
2028 ring->get_seqno = bxt_a_get_seqno; 2030 ring->get_seqno = bxt_a_get_seqno;
2029 ring->set_seqno = bxt_a_set_seqno; 2031 ring->set_seqno = bxt_a_set_seqno;
2030 } else { 2032 } else {
@@ -2079,7 +2081,7 @@ static int logical_blt_ring_init(struct drm_device *dev)
2079 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT; 2081 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
2080 2082
2081 ring->init_hw = gen8_init_common_ring; 2083 ring->init_hw = gen8_init_common_ring;
2082 if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) { 2084 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
2083 ring->get_seqno = bxt_a_get_seqno; 2085 ring->get_seqno = bxt_a_get_seqno;
2084 ring->set_seqno = bxt_a_set_seqno; 2086 ring->set_seqno = bxt_a_set_seqno;
2085 } else { 2087 } else {
@@ -2109,7 +2111,7 @@ static int logical_vebox_ring_init(struct drm_device *dev)
2109 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT; 2111 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
2110 2112
2111 ring->init_hw = gen8_init_common_ring; 2113 ring->init_hw = gen8_init_common_ring;
2112 if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) { 2114 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
2113 ring->get_seqno = bxt_a_get_seqno; 2115 ring->get_seqno = bxt_a_get_seqno;
2114 ring->set_seqno = bxt_a_set_seqno; 2116 ring->set_seqno = bxt_a_set_seqno;
2115 } else { 2117 } else {
@@ -2263,46 +2265,31 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
2263 * only for the first context restore: on a subsequent save, the GPU will 2265 * only for the first context restore: on a subsequent save, the GPU will
2264 * recreate this batchbuffer with new values (including all the missing 2266 * recreate this batchbuffer with new values (including all the missing
2265 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */ 2267 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
2266 if (ring->id == RCS) 2268 reg_state[CTX_LRI_HEADER_0] =
2267 reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(14); 2269 MI_LOAD_REGISTER_IMM(ring->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED;
2268 else 2270 ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(ring),
2269 reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(11); 2271 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
2270 reg_state[CTX_LRI_HEADER_0] |= MI_LRI_FORCE_POSTED; 2272 CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
2271 reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring); 2273 CTX_CTRL_RS_CTX_ENABLE));
2272 reg_state[CTX_CONTEXT_CONTROL+1] = 2274 ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(ring->mmio_base), 0);
2273 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH | 2275 ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(ring->mmio_base), 0);
2274 CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
2275 CTX_CTRL_RS_CTX_ENABLE);
2276 reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base);
2277 reg_state[CTX_RING_HEAD+1] = 0;
2278 reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
2279 reg_state[CTX_RING_TAIL+1] = 0;
2280 reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base);
2281 /* Ring buffer start address is not known until the buffer is pinned. 2276 /* Ring buffer start address is not known until the buffer is pinned.
2282 * It is written to the context image in execlists_update_context() 2277 * It is written to the context image in execlists_update_context()
2283 */ 2278 */
2284 reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base); 2279 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START, RING_START(ring->mmio_base), 0);
2285 reg_state[CTX_RING_BUFFER_CONTROL+1] = 2280 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL, RING_CTL(ring->mmio_base),
2286 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID; 2281 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
2287 reg_state[CTX_BB_HEAD_U] = ring->mmio_base + 0x168; 2282 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U, RING_BBADDR_UDW(ring->mmio_base), 0);
2288 reg_state[CTX_BB_HEAD_U+1] = 0; 2283 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L, RING_BBADDR(ring->mmio_base), 0);
2289 reg_state[CTX_BB_HEAD_L] = ring->mmio_base + 0x140; 2284 ASSIGN_CTX_REG(reg_state, CTX_BB_STATE, RING_BBSTATE(ring->mmio_base),
2290 reg_state[CTX_BB_HEAD_L+1] = 0; 2285 RING_BB_PPGTT);
2291 reg_state[CTX_BB_STATE] = ring->mmio_base + 0x110; 2286 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U, RING_SBBADDR_UDW(ring->mmio_base), 0);
2292 reg_state[CTX_BB_STATE+1] = (1<<5); 2287 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L, RING_SBBADDR(ring->mmio_base), 0);
2293 reg_state[CTX_SECOND_BB_HEAD_U] = ring->mmio_base + 0x11c; 2288 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE, RING_SBBSTATE(ring->mmio_base), 0);
2294 reg_state[CTX_SECOND_BB_HEAD_U+1] = 0;
2295 reg_state[CTX_SECOND_BB_HEAD_L] = ring->mmio_base + 0x114;
2296 reg_state[CTX_SECOND_BB_HEAD_L+1] = 0;
2297 reg_state[CTX_SECOND_BB_STATE] = ring->mmio_base + 0x118;
2298 reg_state[CTX_SECOND_BB_STATE+1] = 0;
2299 if (ring->id == RCS) { 2289 if (ring->id == RCS) {
2300 reg_state[CTX_BB_PER_CTX_PTR] = ring->mmio_base + 0x1c0; 2290 ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(ring->mmio_base), 0);
2301 reg_state[CTX_BB_PER_CTX_PTR+1] = 0; 2291 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX, RING_INDIRECT_CTX(ring->mmio_base), 0);
2302 reg_state[CTX_RCS_INDIRECT_CTX] = ring->mmio_base + 0x1c4; 2292 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET, RING_INDIRECT_CTX_OFFSET(ring->mmio_base), 0);
2303 reg_state[CTX_RCS_INDIRECT_CTX+1] = 0;
2304 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET] = ring->mmio_base + 0x1c8;
2305 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 0;
2306 if (ring->wa_ctx.obj) { 2293 if (ring->wa_ctx.obj) {
2307 struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx; 2294 struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx;
2308 uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj); 2295 uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj);
@@ -2319,18 +2306,17 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
2319 0x01; 2306 0x01;
2320 } 2307 }
2321 } 2308 }
2322 reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9); 2309 reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
2323 reg_state[CTX_LRI_HEADER_1] |= MI_LRI_FORCE_POSTED; 2310 ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(ring->mmio_base), 0);
2324 reg_state[CTX_CTX_TIMESTAMP] = ring->mmio_base + 0x3a8; 2311 /* PDP values well be assigned later if needed */
2325 reg_state[CTX_CTX_TIMESTAMP+1] = 0; 2312 ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(ring, 3), 0);
2326 reg_state[CTX_PDP3_UDW] = GEN8_RING_PDP_UDW(ring, 3); 2313 ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(ring, 3), 0);
2327 reg_state[CTX_PDP3_LDW] = GEN8_RING_PDP_LDW(ring, 3); 2314 ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(ring, 2), 0);
2328 reg_state[CTX_PDP2_UDW] = GEN8_RING_PDP_UDW(ring, 2); 2315 ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(ring, 2), 0);
2329 reg_state[CTX_PDP2_LDW] = GEN8_RING_PDP_LDW(ring, 2); 2316 ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(ring, 1), 0);
2330 reg_state[CTX_PDP1_UDW] = GEN8_RING_PDP_UDW(ring, 1); 2317 ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(ring, 1), 0);
2331 reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1); 2318 ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(ring, 0), 0);
2332 reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0); 2319 ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(ring, 0), 0);
2333 reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0);
2334 2320
2335 if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) { 2321 if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
2336 /* 64b PPGTT (48bit canonical) 2322 /* 64b PPGTT (48bit canonical)
@@ -2352,8 +2338,8 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
2352 2338
2353 if (ring->id == RCS) { 2339 if (ring->id == RCS) {
2354 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); 2340 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
2355 reg_state[CTX_R_PWR_CLK_STATE] = GEN8_R_PWR_CLK_STATE; 2341 ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
2356 reg_state[CTX_R_PWR_CLK_STATE+1] = make_rpcs(dev); 2342 make_rpcs(dev));
2357 } 2343 }
2358 2344
2359 kunmap_atomic(reg_state); 2345 kunmap_atomic(reg_state);
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 4e60d54ba66d..0b821b91723a 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -29,16 +29,16 @@
29#define GEN8_CSB_PTR_MASK 0x07 29#define GEN8_CSB_PTR_MASK 0x07
30 30
31/* Execlists regs */ 31/* Execlists regs */
32#define RING_ELSP(ring) ((ring)->mmio_base+0x230) 32#define RING_ELSP(ring) _MMIO((ring)->mmio_base + 0x230)
33#define RING_EXECLIST_STATUS_LO(ring) ((ring)->mmio_base+0x234) 33#define RING_EXECLIST_STATUS_LO(ring) _MMIO((ring)->mmio_base + 0x234)
34#define RING_EXECLIST_STATUS_HI(ring) ((ring)->mmio_base+0x234 + 4) 34#define RING_EXECLIST_STATUS_HI(ring) _MMIO((ring)->mmio_base + 0x234 + 4)
35#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244) 35#define RING_CONTEXT_CONTROL(ring) _MMIO((ring)->mmio_base + 0x244)
36#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3) 36#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3)
37#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0) 37#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0)
38#define CTX_CTRL_RS_CTX_ENABLE (1 << 1) 38#define CTX_CTRL_RS_CTX_ENABLE (1 << 1)
39#define RING_CONTEXT_STATUS_BUF_LO(ring, i) ((ring)->mmio_base+0x370 + (i) * 8) 39#define RING_CONTEXT_STATUS_BUF_LO(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8)
40#define RING_CONTEXT_STATUS_BUF_HI(ring, i) ((ring)->mmio_base+0x370 + (i) * 8 + 4) 40#define RING_CONTEXT_STATUS_BUF_HI(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8 + 4)
41#define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0) 41#define RING_CONTEXT_STATUS_PTR(ring) _MMIO((ring)->mmio_base + 0x3a0)
42 42
43/* Logical Rings */ 43/* Logical Rings */
44int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request); 44int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
@@ -70,6 +70,11 @@ static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
70 iowrite32(data, ringbuf->virtual_start + ringbuf->tail); 70 iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
71 ringbuf->tail += 4; 71 ringbuf->tail += 4;
72} 72}
73static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
74 i915_reg_t reg)
75{
76 intel_logical_ring_emit(ringbuf, i915_mmio_reg_offset(reg));
77}
73 78
74/* Logical Ring Contexts */ 79/* Logical Ring Contexts */
75 80
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 7f39b8ad88ae..0da0240caf81 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -51,7 +51,7 @@ struct intel_lvds_encoder {
51 struct intel_encoder base; 51 struct intel_encoder base;
52 52
53 bool is_dual_link; 53 bool is_dual_link;
54 u32 reg; 54 i915_reg_t reg;
55 u32 a3_power; 55 u32 a3_power;
56 56
57 struct intel_lvds_connector *attached_connector; 57 struct intel_lvds_connector *attached_connector;
@@ -210,7 +210,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
210 struct intel_connector *intel_connector = 210 struct intel_connector *intel_connector =
211 &lvds_encoder->attached_connector->base; 211 &lvds_encoder->attached_connector->base;
212 struct drm_i915_private *dev_priv = dev->dev_private; 212 struct drm_i915_private *dev_priv = dev->dev_private;
213 u32 ctl_reg, stat_reg; 213 i915_reg_t ctl_reg, stat_reg;
214 214
215 if (HAS_PCH_SPLIT(dev)) { 215 if (HAS_PCH_SPLIT(dev)) {
216 ctl_reg = PCH_PP_CONTROL; 216 ctl_reg = PCH_PP_CONTROL;
@@ -235,7 +235,7 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
235 struct drm_device *dev = encoder->base.dev; 235 struct drm_device *dev = encoder->base.dev;
236 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); 236 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
237 struct drm_i915_private *dev_priv = dev->dev_private; 237 struct drm_i915_private *dev_priv = dev->dev_private;
238 u32 ctl_reg, stat_reg; 238 i915_reg_t ctl_reg, stat_reg;
239 239
240 if (HAS_PCH_SPLIT(dev)) { 240 if (HAS_PCH_SPLIT(dev)) {
241 ctl_reg = PCH_PP_CONTROL; 241 ctl_reg = PCH_PP_CONTROL;
@@ -939,7 +939,7 @@ void intel_lvds_init(struct drm_device *dev)
939 struct drm_display_mode *downclock_mode = NULL; 939 struct drm_display_mode *downclock_mode = NULL;
940 struct edid *edid; 940 struct edid *edid;
941 struct drm_crtc *crtc; 941 struct drm_crtc *crtc;
942 u32 lvds_reg; 942 i915_reg_t lvds_reg;
943 u32 lvds; 943 u32 lvds;
944 int pipe; 944 int pipe;
945 u8 pin; 945 u8 pin;
@@ -1025,7 +1025,7 @@ void intel_lvds_init(struct drm_device *dev)
1025 DRM_MODE_CONNECTOR_LVDS); 1025 DRM_MODE_CONNECTOR_LVDS);
1026 1026
1027 drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs, 1027 drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
1028 DRM_MODE_ENCODER_LVDS); 1028 DRM_MODE_ENCODER_LVDS, NULL);
1029 1029
1030 intel_encoder->enable = intel_enable_lvds; 1030 intel_encoder->enable = intel_enable_lvds;
1031 intel_encoder->pre_enable = intel_pre_enable_lvds; 1031 intel_encoder->pre_enable = intel_pre_enable_lvds;
@@ -1164,8 +1164,7 @@ out:
1164 DRM_DEBUG_KMS("detected %s-link lvds configuration\n", 1164 DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
1165 lvds_encoder->is_dual_link ? "dual" : "single"); 1165 lvds_encoder->is_dual_link ? "dual" : "single");
1166 1166
1167 lvds_encoder->a3_power = I915_READ(lvds_encoder->reg) & 1167 lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK;
1168 LVDS_A3_POWER_MASK;
1169 1168
1170 lvds_connector->lid_notifier.notifier_call = intel_lid_notify; 1169 lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
1171 if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) { 1170 if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 6d3c6c0a5c62..fed7bea19cc9 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -143,7 +143,7 @@ static bool get_mocs_settings(struct drm_device *dev,
143{ 143{
144 bool result = false; 144 bool result = false;
145 145
146 if (IS_SKYLAKE(dev)) { 146 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
147 table->size = ARRAY_SIZE(skylake_mocs_table); 147 table->size = ARRAY_SIZE(skylake_mocs_table);
148 table->table = skylake_mocs_table; 148 table->table = skylake_mocs_table;
149 result = true; 149 result = true;
@@ -159,11 +159,30 @@ static bool get_mocs_settings(struct drm_device *dev,
159 return result; 159 return result;
160} 160}
161 161
162static i915_reg_t mocs_register(enum intel_ring_id ring, int index)
163{
164 switch (ring) {
165 case RCS:
166 return GEN9_GFX_MOCS(index);
167 case VCS:
168 return GEN9_MFX0_MOCS(index);
169 case BCS:
170 return GEN9_BLT_MOCS(index);
171 case VECS:
172 return GEN9_VEBOX_MOCS(index);
173 case VCS2:
174 return GEN9_MFX1_MOCS(index);
175 default:
176 MISSING_CASE(ring);
177 return INVALID_MMIO_REG;
178 }
179}
180
162/** 181/**
163 * emit_mocs_control_table() - emit the mocs control table 182 * emit_mocs_control_table() - emit the mocs control table
164 * @req: Request to set up the MOCS table for. 183 * @req: Request to set up the MOCS table for.
165 * @table: The values to program into the control regs. 184 * @table: The values to program into the control regs.
166 * @reg_base: The base for the engine that needs to be programmed. 185 * @ring: The engine for whom to emit the registers.
167 * 186 *
168 * This function simply emits a MI_LOAD_REGISTER_IMM command for the 187 * This function simply emits a MI_LOAD_REGISTER_IMM command for the
169 * given table starting at the given address. 188 * given table starting at the given address.
@@ -172,7 +191,7 @@ static bool get_mocs_settings(struct drm_device *dev,
172 */ 191 */
173static int emit_mocs_control_table(struct drm_i915_gem_request *req, 192static int emit_mocs_control_table(struct drm_i915_gem_request *req,
174 const struct drm_i915_mocs_table *table, 193 const struct drm_i915_mocs_table *table,
175 u32 reg_base) 194 enum intel_ring_id ring)
176{ 195{
177 struct intel_ringbuffer *ringbuf = req->ringbuf; 196 struct intel_ringbuffer *ringbuf = req->ringbuf;
178 unsigned int index; 197 unsigned int index;
@@ -191,7 +210,7 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
191 MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES)); 210 MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
192 211
193 for (index = 0; index < table->size; index++) { 212 for (index = 0; index < table->size; index++) {
194 intel_logical_ring_emit(ringbuf, reg_base + index * 4); 213 intel_logical_ring_emit_reg(ringbuf, mocs_register(ring, index));
195 intel_logical_ring_emit(ringbuf, 214 intel_logical_ring_emit(ringbuf,
196 table->table[index].control_value); 215 table->table[index].control_value);
197 } 216 }
@@ -205,7 +224,7 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
205 * that value to all the used entries. 224 * that value to all the used entries.
206 */ 225 */
207 for (; index < GEN9_NUM_MOCS_ENTRIES; index++) { 226 for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
208 intel_logical_ring_emit(ringbuf, reg_base + index * 4); 227 intel_logical_ring_emit_reg(ringbuf, mocs_register(ring, index));
209 intel_logical_ring_emit(ringbuf, table->table[0].control_value); 228 intel_logical_ring_emit(ringbuf, table->table[0].control_value);
210 } 229 }
211 230
@@ -253,7 +272,7 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
253 value = (table->table[count].l3cc_value & 0xffff) | 272 value = (table->table[count].l3cc_value & 0xffff) |
254 ((table->table[count + 1].l3cc_value & 0xffff) << 16); 273 ((table->table[count + 1].l3cc_value & 0xffff) << 16);
255 274
256 intel_logical_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4); 275 intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
257 intel_logical_ring_emit(ringbuf, value); 276 intel_logical_ring_emit(ringbuf, value);
258 } 277 }
259 278
@@ -270,7 +289,7 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
270 * they are reserved by the hardware. 289 * they are reserved by the hardware.
271 */ 290 */
272 for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) { 291 for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
273 intel_logical_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4); 292 intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
274 intel_logical_ring_emit(ringbuf, value); 293 intel_logical_ring_emit(ringbuf, value);
275 294
276 value = filler; 295 value = filler;
@@ -304,26 +323,16 @@ int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req)
304 int ret; 323 int ret;
305 324
306 if (get_mocs_settings(req->ring->dev, &t)) { 325 if (get_mocs_settings(req->ring->dev, &t)) {
307 /* Program the control registers */ 326 struct drm_i915_private *dev_priv = req->i915;
308 ret = emit_mocs_control_table(req, &t, GEN9_GFX_MOCS_0); 327 struct intel_engine_cs *ring;
309 if (ret) 328 enum intel_ring_id ring_id;
310 return ret;
311
312 ret = emit_mocs_control_table(req, &t, GEN9_MFX0_MOCS_0);
313 if (ret)
314 return ret;
315 329
316 ret = emit_mocs_control_table(req, &t, GEN9_MFX1_MOCS_0); 330 /* Program the control registers */
317 if (ret) 331 for_each_ring(ring, dev_priv, ring_id) {
318 return ret; 332 ret = emit_mocs_control_table(req, &t, ring_id);
319 333 if (ret)
320 ret = emit_mocs_control_table(req, &t, GEN9_VEBOX_MOCS_0); 334 return ret;
321 if (ret) 335 }
322 return ret;
323
324 ret = emit_mocs_control_table(req, &t, GEN9_BLT_MOCS_0);
325 if (ret)
326 return ret;
327 336
328 /* Now program the l3cc registers */ 337 /* Now program the l3cc registers */
329 ret = emit_mocs_l3cc_table(req, &t); 338 ret = emit_mocs_l3cc_table(req, &t);
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 6dc13c02c28e..e362a30776fa 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -682,7 +682,7 @@ static void intel_didl_outputs(struct drm_device *dev)
682 } 682 }
683 683
684 if (!acpi_video_bus) { 684 if (!acpi_video_bus) {
685 DRM_ERROR("No ACPI video bus found\n"); 685 DRM_DEBUG_KMS("No ACPI video bus found\n");
686 return; 686 return;
687 } 687 }
688 688
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 444542696a2c..76f1980a7541 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -749,7 +749,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
749 if (ret != 0) 749 if (ret != 0)
750 return ret; 750 return ret;
751 751
752 ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL, NULL, 752 ret = i915_gem_object_pin_to_display_plane(new_bo, 0,
753 &i915_ggtt_view_normal); 753 &i915_ggtt_view_normal);
754 if (ret != 0) 754 if (ret != 0)
755 return ret; 755 return ret;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index a24df35e11e7..ae808b68a44f 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -1264,6 +1264,14 @@ static void intel_backlight_device_unregister(struct intel_connector *connector)
1264#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */ 1264#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
1265 1265
1266/* 1266/*
1267 * BXT: PWM clock frequency = 19.2 MHz.
1268 */
1269static u32 bxt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
1270{
1271 return KHz(19200) / pwm_freq_hz;
1272}
1273
1274/*
1267 * SPT: This value represents the period of the PWM stream in clock periods 1275 * SPT: This value represents the period of the PWM stream in clock periods
1268 * multiplied by 16 (default increment) or 128 (alternate increment selected in 1276 * multiplied by 16 (default increment) or 128 (alternate increment selected in
1269 * SCHICKEN_1 bit 0). PWM clock is 24 MHz. 1277 * SCHICKEN_1 bit 0). PWM clock is 24 MHz.
@@ -1300,7 +1308,7 @@ static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
1300 else 1308 else
1301 mul = 128; 1309 mul = 128;
1302 1310
1303 if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) 1311 if (HAS_PCH_LPT_H(dev_priv))
1304 clock = MHz(135); /* LPT:H */ 1312 clock = MHz(135); /* LPT:H */
1305 else 1313 else
1306 clock = MHz(24); /* LPT:LP */ 1314 clock = MHz(24); /* LPT:LP */
@@ -1335,22 +1343,28 @@ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
1335 int clock; 1343 int clock;
1336 1344
1337 if (IS_PINEVIEW(dev)) 1345 if (IS_PINEVIEW(dev))
1338 clock = intel_hrawclk(dev); 1346 clock = MHz(intel_hrawclk(dev));
1339 else 1347 else
1340 clock = 1000 * dev_priv->display.get_display_clock_speed(dev); 1348 clock = 1000 * dev_priv->cdclk_freq;
1341 1349
1342 return clock / (pwm_freq_hz * 32); 1350 return clock / (pwm_freq_hz * 32);
1343} 1351}
1344 1352
1345/* 1353/*
1346 * Gen4: This value represents the period of the PWM stream in display core 1354 * Gen4: This value represents the period of the PWM stream in display core
1347 * clocks multiplied by 128. 1355 * clocks ([DevCTG] HRAW clocks) multiplied by 128.
1356 *
1348 */ 1357 */
1349static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) 1358static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
1350{ 1359{
1351 struct drm_device *dev = connector->base.dev; 1360 struct drm_device *dev = connector->base.dev;
1352 struct drm_i915_private *dev_priv = dev->dev_private; 1361 struct drm_i915_private *dev_priv = dev->dev_private;
1353 int clock = 1000 * dev_priv->display.get_display_clock_speed(dev); 1362 int clock;
1363
1364 if (IS_G4X(dev_priv))
1365 clock = MHz(intel_hrawclk(dev));
1366 else
1367 clock = 1000 * dev_priv->cdclk_freq;
1354 1368
1355 return clock / (pwm_freq_hz * 128); 1369 return clock / (pwm_freq_hz * 128);
1356} 1370}
@@ -1385,14 +1399,18 @@ static u32 get_backlight_max_vbt(struct intel_connector *connector)
1385 u16 pwm_freq_hz = dev_priv->vbt.backlight.pwm_freq_hz; 1399 u16 pwm_freq_hz = dev_priv->vbt.backlight.pwm_freq_hz;
1386 u32 pwm; 1400 u32 pwm;
1387 1401
1388 if (!pwm_freq_hz) { 1402 if (!panel->backlight.hz_to_pwm) {
1389 DRM_DEBUG_KMS("backlight frequency not specified in VBT\n"); 1403 DRM_DEBUG_KMS("backlight frequency conversion not supported\n");
1390 return 0; 1404 return 0;
1391 } 1405 }
1392 1406
1393 if (!panel->backlight.hz_to_pwm) { 1407 if (pwm_freq_hz) {
1394 DRM_DEBUG_KMS("backlight frequency setting from VBT currently not supported on this platform\n"); 1408 DRM_DEBUG_KMS("VBT defined backlight frequency %u Hz\n",
1395 return 0; 1409 pwm_freq_hz);
1410 } else {
1411 pwm_freq_hz = 200;
1412 DRM_DEBUG_KMS("default backlight frequency %u Hz\n",
1413 pwm_freq_hz);
1396 } 1414 }
1397 1415
1398 pwm = panel->backlight.hz_to_pwm(connector, pwm_freq_hz); 1416 pwm = panel->backlight.hz_to_pwm(connector, pwm_freq_hz);
@@ -1401,8 +1419,6 @@ static u32 get_backlight_max_vbt(struct intel_connector *connector)
1401 return 0; 1419 return 0;
1402 } 1420 }
1403 1421
1404 DRM_DEBUG_KMS("backlight frequency %u Hz from VBT\n", pwm_freq_hz);
1405
1406 return pwm; 1422 return pwm;
1407} 1423}
1408 1424
@@ -1750,6 +1766,7 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
1750 panel->backlight.disable = bxt_disable_backlight; 1766 panel->backlight.disable = bxt_disable_backlight;
1751 panel->backlight.set = bxt_set_backlight; 1767 panel->backlight.set = bxt_set_backlight;
1752 panel->backlight.get = bxt_get_backlight; 1768 panel->backlight.get = bxt_get_backlight;
1769 panel->backlight.hz_to_pwm = bxt_hz_to_pwm;
1753 } else if (HAS_PCH_LPT(dev) || HAS_PCH_SPT(dev)) { 1770 } else if (HAS_PCH_LPT(dev) || HAS_PCH_SPT(dev)) {
1754 panel->backlight.setup = lpt_setup_backlight; 1771 panel->backlight.setup = lpt_setup_backlight;
1755 panel->backlight.enable = lpt_enable_backlight; 1772 panel->backlight.enable = lpt_enable_backlight;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index d52a15df6917..ee05ce8bf79a 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -66,6 +66,14 @@ static void bxt_init_clock_gating(struct drm_device *dev)
66 */ 66 */
67 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 67 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
68 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ); 68 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
69
70 /*
71 * Wa: Backlight PWM may stop in the asserted state, causing backlight
72 * to stay fully on.
73 */
74 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
75 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
76 PWM1_GATING_DIS | PWM2_GATING_DIS);
69} 77}
70 78
71static void i915_pineview_get_mem_freq(struct drm_device *dev) 79static void i915_pineview_get_mem_freq(struct drm_device *dev)
@@ -1708,13 +1716,6 @@ static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
1708 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2; 1716 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
1709} 1717}
1710 1718
1711struct skl_pipe_wm_parameters {
1712 bool active;
1713 uint32_t pipe_htotal;
1714 uint32_t pixel_rate; /* in KHz */
1715 struct intel_plane_wm_parameters plane[I915_MAX_PLANES];
1716};
1717
1718struct ilk_wm_maximums { 1719struct ilk_wm_maximums {
1719 uint16_t pri; 1720 uint16_t pri;
1720 uint16_t spr; 1721 uint16_t spr;
@@ -1722,13 +1723,6 @@ struct ilk_wm_maximums {
1722 uint16_t fbc; 1723 uint16_t fbc;
1723}; 1724};
1724 1725
1725/* used in computing the new watermarks state */
1726struct intel_wm_config {
1727 unsigned int num_pipes_active;
1728 bool sprites_enabled;
1729 bool sprites_scaled;
1730};
1731
1732/* 1726/*
1733 * For both WM_PIPE and WM_LP. 1727 * For both WM_PIPE and WM_LP.
1734 * mem_value must be in 0.1us units. 1728 * mem_value must be in 0.1us units.
@@ -1979,9 +1973,11 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
1979 const struct intel_crtc *intel_crtc, 1973 const struct intel_crtc *intel_crtc,
1980 int level, 1974 int level,
1981 struct intel_crtc_state *cstate, 1975 struct intel_crtc_state *cstate,
1976 struct intel_plane_state *pristate,
1977 struct intel_plane_state *sprstate,
1978 struct intel_plane_state *curstate,
1982 struct intel_wm_level *result) 1979 struct intel_wm_level *result)
1983{ 1980{
1984 struct intel_plane *intel_plane;
1985 uint16_t pri_latency = dev_priv->wm.pri_latency[level]; 1981 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
1986 uint16_t spr_latency = dev_priv->wm.spr_latency[level]; 1982 uint16_t spr_latency = dev_priv->wm.spr_latency[level];
1987 uint16_t cur_latency = dev_priv->wm.cur_latency[level]; 1983 uint16_t cur_latency = dev_priv->wm.cur_latency[level];
@@ -1993,29 +1989,11 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
1993 cur_latency *= 5; 1989 cur_latency *= 5;
1994 } 1990 }
1995 1991
1996 for_each_intel_plane_on_crtc(dev_priv->dev, intel_crtc, intel_plane) { 1992 result->pri_val = ilk_compute_pri_wm(cstate, pristate,
1997 struct intel_plane_state *pstate = 1993 pri_latency, level);
1998 to_intel_plane_state(intel_plane->base.state); 1994 result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency);
1999 1995 result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency);
2000 switch (intel_plane->base.type) { 1996 result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val);
2001 case DRM_PLANE_TYPE_PRIMARY:
2002 result->pri_val = ilk_compute_pri_wm(cstate, pstate,
2003 pri_latency,
2004 level);
2005 result->fbc_val = ilk_compute_fbc_wm(cstate, pstate,
2006 result->pri_val);
2007 break;
2008 case DRM_PLANE_TYPE_OVERLAY:
2009 result->spr_val = ilk_compute_spr_wm(cstate, pstate,
2010 spr_latency);
2011 break;
2012 case DRM_PLANE_TYPE_CURSOR:
2013 result->cur_val = ilk_compute_cur_wm(cstate, pstate,
2014 cur_latency);
2015 break;
2016 }
2017 }
2018
2019 result->enable = true; 1997 result->enable = true;
2020} 1998}
2021 1999
@@ -2274,34 +2252,19 @@ static void skl_setup_wm_latency(struct drm_device *dev)
2274 intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency); 2252 intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
2275} 2253}
2276 2254
2277static void ilk_compute_wm_config(struct drm_device *dev,
2278 struct intel_wm_config *config)
2279{
2280 struct intel_crtc *intel_crtc;
2281
2282 /* Compute the currently _active_ config */
2283 for_each_intel_crtc(dev, intel_crtc) {
2284 const struct intel_pipe_wm *wm = &intel_crtc->wm.active;
2285
2286 if (!wm->pipe_enabled)
2287 continue;
2288
2289 config->sprites_enabled |= wm->sprites_enabled;
2290 config->sprites_scaled |= wm->sprites_scaled;
2291 config->num_pipes_active++;
2292 }
2293}
2294
2295/* Compute new watermarks for the pipe */ 2255/* Compute new watermarks for the pipe */
2296static bool intel_compute_pipe_wm(struct intel_crtc_state *cstate, 2256static int ilk_compute_pipe_wm(struct intel_crtc *intel_crtc,
2297 struct intel_pipe_wm *pipe_wm) 2257 struct drm_atomic_state *state)
2298{ 2258{
2299 struct drm_crtc *crtc = cstate->base.crtc; 2259 struct intel_pipe_wm *pipe_wm;
2300 struct drm_device *dev = crtc->dev; 2260 struct drm_device *dev = intel_crtc->base.dev;
2301 const struct drm_i915_private *dev_priv = dev->dev_private; 2261 const struct drm_i915_private *dev_priv = dev->dev_private;
2302 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2262 struct intel_crtc_state *cstate = NULL;
2303 struct intel_plane *intel_plane; 2263 struct intel_plane *intel_plane;
2264 struct drm_plane_state *ps;
2265 struct intel_plane_state *pristate = NULL;
2304 struct intel_plane_state *sprstate = NULL; 2266 struct intel_plane_state *sprstate = NULL;
2267 struct intel_plane_state *curstate = NULL;
2305 int level, max_level = ilk_wm_max_level(dev); 2268 int level, max_level = ilk_wm_max_level(dev);
2306 /* LP0 watermark maximums depend on this pipe alone */ 2269 /* LP0 watermark maximums depend on this pipe alone */
2307 struct intel_wm_config config = { 2270 struct intel_wm_config config = {
@@ -2309,11 +2272,24 @@ static bool intel_compute_pipe_wm(struct intel_crtc_state *cstate,
2309 }; 2272 };
2310 struct ilk_wm_maximums max; 2273 struct ilk_wm_maximums max;
2311 2274
2275 cstate = intel_atomic_get_crtc_state(state, intel_crtc);
2276 if (IS_ERR(cstate))
2277 return PTR_ERR(cstate);
2278
2279 pipe_wm = &cstate->wm.optimal.ilk;
2280
2312 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 2281 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2313 if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY) { 2282 ps = drm_atomic_get_plane_state(state,
2314 sprstate = to_intel_plane_state(intel_plane->base.state); 2283 &intel_plane->base);
2315 break; 2284 if (IS_ERR(ps))
2316 } 2285 return PTR_ERR(ps);
2286
2287 if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY)
2288 pristate = to_intel_plane_state(ps);
2289 else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY)
2290 sprstate = to_intel_plane_state(ps);
2291 else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
2292 curstate = to_intel_plane_state(ps);
2317 } 2293 }
2318 2294
2319 config.sprites_enabled = sprstate->visible; 2295 config.sprites_enabled = sprstate->visible;
@@ -2322,7 +2298,7 @@ static bool intel_compute_pipe_wm(struct intel_crtc_state *cstate,
2322 drm_rect_height(&sprstate->dst) != drm_rect_height(&sprstate->src) >> 16); 2298 drm_rect_height(&sprstate->dst) != drm_rect_height(&sprstate->src) >> 16);
2323 2299
2324 pipe_wm->pipe_enabled = cstate->base.active; 2300 pipe_wm->pipe_enabled = cstate->base.active;
2325 pipe_wm->sprites_enabled = sprstate->visible; 2301 pipe_wm->sprites_enabled = config.sprites_enabled;
2326 pipe_wm->sprites_scaled = config.sprites_scaled; 2302 pipe_wm->sprites_scaled = config.sprites_scaled;
2327 2303
2328 /* ILK/SNB: LP2+ watermarks only w/o sprites */ 2304 /* ILK/SNB: LP2+ watermarks only w/o sprites */
@@ -2333,24 +2309,27 @@ static bool intel_compute_pipe_wm(struct intel_crtc_state *cstate,
2333 if (config.sprites_scaled) 2309 if (config.sprites_scaled)
2334 max_level = 0; 2310 max_level = 0;
2335 2311
2336 ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate, &pipe_wm->wm[0]); 2312 ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
2313 pristate, sprstate, curstate, &pipe_wm->wm[0]);
2337 2314
2338 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2315 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2339 pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc); 2316 pipe_wm->linetime = hsw_compute_linetime_wm(dev,
2317 &intel_crtc->base);
2340 2318
2341 /* LP0 watermarks always use 1/2 DDB partitioning */ 2319 /* LP0 watermarks always use 1/2 DDB partitioning */
2342 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max); 2320 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2343 2321
2344 /* At least LP0 must be valid */ 2322 /* At least LP0 must be valid */
2345 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) 2323 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]))
2346 return false; 2324 return -EINVAL;
2347 2325
2348 ilk_compute_wm_reg_maximums(dev, 1, &max); 2326 ilk_compute_wm_reg_maximums(dev, 1, &max);
2349 2327
2350 for (level = 1; level <= max_level; level++) { 2328 for (level = 1; level <= max_level; level++) {
2351 struct intel_wm_level wm = {}; 2329 struct intel_wm_level wm = {};
2352 2330
2353 ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate, &wm); 2331 ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
2332 pristate, sprstate, curstate, &wm);
2354 2333
2355 /* 2334 /*
2356 * Disable any watermark level that exceeds the 2335 * Disable any watermark level that exceeds the
@@ -2363,7 +2342,7 @@ static bool intel_compute_pipe_wm(struct intel_crtc_state *cstate,
2363 pipe_wm->wm[level] = wm; 2342 pipe_wm->wm[level] = wm;
2364 } 2343 }
2365 2344
2366 return true; 2345 return 0;
2367} 2346}
2368 2347
2369/* 2348/*
@@ -2378,7 +2357,9 @@ static void ilk_merge_wm_level(struct drm_device *dev,
2378 ret_wm->enable = true; 2357 ret_wm->enable = true;
2379 2358
2380 for_each_intel_crtc(dev, intel_crtc) { 2359 for_each_intel_crtc(dev, intel_crtc) {
2381 const struct intel_pipe_wm *active = &intel_crtc->wm.active; 2360 const struct intel_crtc_state *cstate =
2361 to_intel_crtc_state(intel_crtc->base.state);
2362 const struct intel_pipe_wm *active = &cstate->wm.optimal.ilk;
2382 const struct intel_wm_level *wm = &active->wm[level]; 2363 const struct intel_wm_level *wm = &active->wm[level];
2383 2364
2384 if (!active->pipe_enabled) 2365 if (!active->pipe_enabled)
@@ -2449,7 +2430,7 @@ static void ilk_wm_merge(struct drm_device *dev,
2449 * enabled sometime later. 2430 * enabled sometime later.
2450 */ 2431 */
2451 if (IS_GEN5(dev) && !merged->fbc_wm_enabled && 2432 if (IS_GEN5(dev) && !merged->fbc_wm_enabled &&
2452 intel_fbc_enabled(dev_priv)) { 2433 intel_fbc_is_active(dev_priv)) {
2453 for (level = 2; level <= max_level; level++) { 2434 for (level = 2; level <= max_level; level++) {
2454 struct intel_wm_level *wm = &merged->wm[level]; 2435 struct intel_wm_level *wm = &merged->wm[level];
2455 2436
@@ -2526,14 +2507,15 @@ static void ilk_compute_wm_results(struct drm_device *dev,
2526 2507
2527 /* LP0 register values */ 2508 /* LP0 register values */
2528 for_each_intel_crtc(dev, intel_crtc) { 2509 for_each_intel_crtc(dev, intel_crtc) {
2510 const struct intel_crtc_state *cstate =
2511 to_intel_crtc_state(intel_crtc->base.state);
2529 enum pipe pipe = intel_crtc->pipe; 2512 enum pipe pipe = intel_crtc->pipe;
2530 const struct intel_wm_level *r = 2513 const struct intel_wm_level *r = &cstate->wm.optimal.ilk.wm[0];
2531 &intel_crtc->wm.active.wm[0];
2532 2514
2533 if (WARN_ON(!r->enable)) 2515 if (WARN_ON(!r->enable))
2534 continue; 2516 continue;
2535 2517
2536 results->wm_linetime[pipe] = intel_crtc->wm.active.linetime; 2518 results->wm_linetime[pipe] = cstate->wm.optimal.ilk.linetime;
2537 2519
2538 results->wm_pipe[pipe] = 2520 results->wm_pipe[pipe] =
2539 (r->pri_val << WM0_PIPE_PLANE_SHIFT) | 2521 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
@@ -2755,18 +2737,40 @@ static bool ilk_disable_lp_wm(struct drm_device *dev)
2755#define SKL_DDB_SIZE 896 /* in blocks */ 2737#define SKL_DDB_SIZE 896 /* in blocks */
2756#define BXT_DDB_SIZE 512 2738#define BXT_DDB_SIZE 512
2757 2739
2740/*
2741 * Return the index of a plane in the SKL DDB and wm result arrays. Primary
2742 * plane is always in slot 0, cursor is always in slot I915_MAX_PLANES-1, and
2743 * other universal planes are in indices 1..n. Note that this may leave unused
2744 * indices between the top "sprite" plane and the cursor.
2745 */
2746static int
2747skl_wm_plane_id(const struct intel_plane *plane)
2748{
2749 switch (plane->base.type) {
2750 case DRM_PLANE_TYPE_PRIMARY:
2751 return 0;
2752 case DRM_PLANE_TYPE_CURSOR:
2753 return PLANE_CURSOR;
2754 case DRM_PLANE_TYPE_OVERLAY:
2755 return plane->plane + 1;
2756 default:
2757 MISSING_CASE(plane->base.type);
2758 return plane->plane;
2759 }
2760}
2761
2758static void 2762static void
2759skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, 2763skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
2760 struct drm_crtc *for_crtc, 2764 const struct intel_crtc_state *cstate,
2761 const struct intel_wm_config *config, 2765 const struct intel_wm_config *config,
2762 const struct skl_pipe_wm_parameters *params,
2763 struct skl_ddb_entry *alloc /* out */) 2766 struct skl_ddb_entry *alloc /* out */)
2764{ 2767{
2768 struct drm_crtc *for_crtc = cstate->base.crtc;
2765 struct drm_crtc *crtc; 2769 struct drm_crtc *crtc;
2766 unsigned int pipe_size, ddb_size; 2770 unsigned int pipe_size, ddb_size;
2767 int nth_active_pipe; 2771 int nth_active_pipe;
2768 2772
2769 if (!params->active) { 2773 if (!cstate->base.active) {
2770 alloc->start = 0; 2774 alloc->start = 0;
2771 alloc->end = 0; 2775 alloc->end = 0;
2772 return; 2776 return;
@@ -2837,19 +2841,29 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
2837} 2841}
2838 2842
2839static unsigned int 2843static unsigned int
2840skl_plane_relative_data_rate(const struct intel_plane_wm_parameters *p, int y) 2844skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
2845 const struct drm_plane_state *pstate,
2846 int y)
2841{ 2847{
2848 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
2849 struct drm_framebuffer *fb = pstate->fb;
2842 2850
2843 /* for planar format */ 2851 /* for planar format */
2844 if (p->y_bytes_per_pixel) { 2852 if (fb->pixel_format == DRM_FORMAT_NV12) {
2845 if (y) /* y-plane data rate */ 2853 if (y) /* y-plane data rate */
2846 return p->horiz_pixels * p->vert_pixels * p->y_bytes_per_pixel; 2854 return intel_crtc->config->pipe_src_w *
2855 intel_crtc->config->pipe_src_h *
2856 drm_format_plane_cpp(fb->pixel_format, 0);
2847 else /* uv-plane data rate */ 2857 else /* uv-plane data rate */
2848 return (p->horiz_pixels/2) * (p->vert_pixels/2) * p->bytes_per_pixel; 2858 return (intel_crtc->config->pipe_src_w/2) *
2859 (intel_crtc->config->pipe_src_h/2) *
2860 drm_format_plane_cpp(fb->pixel_format, 1);
2849 } 2861 }
2850 2862
2851 /* for packed formats */ 2863 /* for packed formats */
2852 return p->horiz_pixels * p->vert_pixels * p->bytes_per_pixel; 2864 return intel_crtc->config->pipe_src_w *
2865 intel_crtc->config->pipe_src_h *
2866 drm_format_plane_cpp(fb->pixel_format, 0);
2853} 2867}
2854 2868
2855/* 2869/*
@@ -2858,46 +2872,55 @@ skl_plane_relative_data_rate(const struct intel_plane_wm_parameters *p, int y)
2858 * 3 * 4096 * 8192 * 4 < 2^32 2872 * 3 * 4096 * 8192 * 4 < 2^32
2859 */ 2873 */
2860static unsigned int 2874static unsigned int
2861skl_get_total_relative_data_rate(struct intel_crtc *intel_crtc, 2875skl_get_total_relative_data_rate(const struct intel_crtc_state *cstate)
2862 const struct skl_pipe_wm_parameters *params)
2863{ 2876{
2877 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
2878 struct drm_device *dev = intel_crtc->base.dev;
2879 const struct intel_plane *intel_plane;
2864 unsigned int total_data_rate = 0; 2880 unsigned int total_data_rate = 0;
2865 int plane;
2866 2881
2867 for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) { 2882 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2868 const struct intel_plane_wm_parameters *p; 2883 const struct drm_plane_state *pstate = intel_plane->base.state;
2869 2884
2870 p = &params->plane[plane]; 2885 if (pstate->fb == NULL)
2871 if (!p->enabled)
2872 continue; 2886 continue;
2873 2887
2874 total_data_rate += skl_plane_relative_data_rate(p, 0); /* packed/uv */ 2888 if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
2875 if (p->y_bytes_per_pixel) { 2889 continue;
2876 total_data_rate += skl_plane_relative_data_rate(p, 1); /* y-plane */ 2890
2877 } 2891 /* packed/uv */
2892 total_data_rate += skl_plane_relative_data_rate(cstate,
2893 pstate,
2894 0);
2895
2896 if (pstate->fb->pixel_format == DRM_FORMAT_NV12)
2897 /* y-plane */
2898 total_data_rate += skl_plane_relative_data_rate(cstate,
2899 pstate,
2900 1);
2878 } 2901 }
2879 2902
2880 return total_data_rate; 2903 return total_data_rate;
2881} 2904}
2882 2905
2883static void 2906static void
2884skl_allocate_pipe_ddb(struct drm_crtc *crtc, 2907skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
2885 const struct intel_wm_config *config,
2886 const struct skl_pipe_wm_parameters *params,
2887 struct skl_ddb_allocation *ddb /* out */) 2908 struct skl_ddb_allocation *ddb /* out */)
2888{ 2909{
2910 struct drm_crtc *crtc = cstate->base.crtc;
2889 struct drm_device *dev = crtc->dev; 2911 struct drm_device *dev = crtc->dev;
2890 struct drm_i915_private *dev_priv = dev->dev_private; 2912 struct drm_i915_private *dev_priv = to_i915(dev);
2913 struct intel_wm_config *config = &dev_priv->wm.config;
2891 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2914 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2915 struct intel_plane *intel_plane;
2892 enum pipe pipe = intel_crtc->pipe; 2916 enum pipe pipe = intel_crtc->pipe;
2893 struct skl_ddb_entry *alloc = &ddb->pipe[pipe]; 2917 struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
2894 uint16_t alloc_size, start, cursor_blocks; 2918 uint16_t alloc_size, start, cursor_blocks;
2895 uint16_t minimum[I915_MAX_PLANES]; 2919 uint16_t minimum[I915_MAX_PLANES];
2896 uint16_t y_minimum[I915_MAX_PLANES]; 2920 uint16_t y_minimum[I915_MAX_PLANES];
2897 unsigned int total_data_rate; 2921 unsigned int total_data_rate;
2898 int plane;
2899 2922
2900 skl_ddb_get_pipe_allocation_limits(dev, crtc, config, params, alloc); 2923 skl_ddb_get_pipe_allocation_limits(dev, cstate, config, alloc);
2901 alloc_size = skl_ddb_entry_size(alloc); 2924 alloc_size = skl_ddb_entry_size(alloc);
2902 if (alloc_size == 0) { 2925 if (alloc_size == 0) {
2903 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe])); 2926 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
@@ -2914,17 +2937,20 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc,
2914 alloc->end -= cursor_blocks; 2937 alloc->end -= cursor_blocks;
2915 2938
2916 /* 1. Allocate the mininum required blocks for each active plane */ 2939 /* 1. Allocate the mininum required blocks for each active plane */
2917 for_each_plane(dev_priv, pipe, plane) { 2940 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2918 const struct intel_plane_wm_parameters *p; 2941 struct drm_plane *plane = &intel_plane->base;
2942 struct drm_framebuffer *fb = plane->state->fb;
2943 int id = skl_wm_plane_id(intel_plane);
2919 2944
2920 p = &params->plane[plane]; 2945 if (fb == NULL)
2921 if (!p->enabled) 2946 continue;
2947 if (plane->type == DRM_PLANE_TYPE_CURSOR)
2922 continue; 2948 continue;
2923 2949
2924 minimum[plane] = 8; 2950 minimum[id] = 8;
2925 alloc_size -= minimum[plane]; 2951 alloc_size -= minimum[id];
2926 y_minimum[plane] = p->y_bytes_per_pixel ? 8 : 0; 2952 y_minimum[id] = (fb->pixel_format == DRM_FORMAT_NV12) ? 8 : 0;
2927 alloc_size -= y_minimum[plane]; 2953 alloc_size -= y_minimum[id];
2928 } 2954 }
2929 2955
2930 /* 2956 /*
@@ -2933,45 +2959,50 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc,
2933 * 2959 *
2934 * FIXME: we may not allocate every single block here. 2960 * FIXME: we may not allocate every single block here.
2935 */ 2961 */
2936 total_data_rate = skl_get_total_relative_data_rate(intel_crtc, params); 2962 total_data_rate = skl_get_total_relative_data_rate(cstate);
2937 2963
2938 start = alloc->start; 2964 start = alloc->start;
2939 for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) { 2965 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2940 const struct intel_plane_wm_parameters *p; 2966 struct drm_plane *plane = &intel_plane->base;
2967 struct drm_plane_state *pstate = intel_plane->base.state;
2941 unsigned int data_rate, y_data_rate; 2968 unsigned int data_rate, y_data_rate;
2942 uint16_t plane_blocks, y_plane_blocks = 0; 2969 uint16_t plane_blocks, y_plane_blocks = 0;
2970 int id = skl_wm_plane_id(intel_plane);
2943 2971
2944 p = &params->plane[plane]; 2972 if (pstate->fb == NULL)
2945 if (!p->enabled) 2973 continue;
2974 if (plane->type == DRM_PLANE_TYPE_CURSOR)
2946 continue; 2975 continue;
2947 2976
2948 data_rate = skl_plane_relative_data_rate(p, 0); 2977 data_rate = skl_plane_relative_data_rate(cstate, pstate, 0);
2949 2978
2950 /* 2979 /*
2951 * allocation for (packed formats) or (uv-plane part of planar format): 2980 * allocation for (packed formats) or (uv-plane part of planar format):
2952 * promote the expression to 64 bits to avoid overflowing, the 2981 * promote the expression to 64 bits to avoid overflowing, the
2953 * result is < available as data_rate / total_data_rate < 1 2982 * result is < available as data_rate / total_data_rate < 1
2954 */ 2983 */
2955 plane_blocks = minimum[plane]; 2984 plane_blocks = minimum[id];
2956 plane_blocks += div_u64((uint64_t)alloc_size * data_rate, 2985 plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
2957 total_data_rate); 2986 total_data_rate);
2958 2987
2959 ddb->plane[pipe][plane].start = start; 2988 ddb->plane[pipe][id].start = start;
2960 ddb->plane[pipe][plane].end = start + plane_blocks; 2989 ddb->plane[pipe][id].end = start + plane_blocks;
2961 2990
2962 start += plane_blocks; 2991 start += plane_blocks;
2963 2992
2964 /* 2993 /*
2965 * allocation for y_plane part of planar format: 2994 * allocation for y_plane part of planar format:
2966 */ 2995 */
2967 if (p->y_bytes_per_pixel) { 2996 if (pstate->fb->pixel_format == DRM_FORMAT_NV12) {
2968 y_data_rate = skl_plane_relative_data_rate(p, 1); 2997 y_data_rate = skl_plane_relative_data_rate(cstate,
2969 y_plane_blocks = y_minimum[plane]; 2998 pstate,
2999 1);
3000 y_plane_blocks = y_minimum[id];
2970 y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate, 3001 y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
2971 total_data_rate); 3002 total_data_rate);
2972 3003
2973 ddb->y_plane[pipe][plane].start = start; 3004 ddb->y_plane[pipe][id].start = start;
2974 ddb->y_plane[pipe][plane].end = start + y_plane_blocks; 3005 ddb->y_plane[pipe][id].end = start + y_plane_blocks;
2975 3006
2976 start += y_plane_blocks; 3007 start += y_plane_blocks;
2977 } 3008 }
@@ -3041,104 +3072,27 @@ static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation *new_ddb,
3041 struct drm_device *dev = intel_crtc->base.dev; 3072 struct drm_device *dev = intel_crtc->base.dev;
3042 struct drm_i915_private *dev_priv = dev->dev_private; 3073 struct drm_i915_private *dev_priv = dev->dev_private;
3043 const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb; 3074 const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
3044 enum pipe pipe = intel_crtc->pipe;
3045 3075
3046 if (memcmp(new_ddb->plane[pipe], cur_ddb->plane[pipe], 3076 /*
3047 sizeof(new_ddb->plane[pipe]))) 3077 * If ddb allocation of pipes changed, it may require recalculation of
3048 return true; 3078 * watermarks
3049 3079 */
3050 if (memcmp(&new_ddb->plane[pipe][PLANE_CURSOR], &cur_ddb->plane[pipe][PLANE_CURSOR], 3080 if (memcmp(new_ddb->pipe, cur_ddb->pipe, sizeof(new_ddb->pipe)))
3051 sizeof(new_ddb->plane[pipe][PLANE_CURSOR])))
3052 return true; 3081 return true;
3053 3082
3054 return false; 3083 return false;
3055} 3084}
3056 3085
3057static void skl_compute_wm_global_parameters(struct drm_device *dev,
3058 struct intel_wm_config *config)
3059{
3060 struct drm_crtc *crtc;
3061 struct drm_plane *plane;
3062
3063 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
3064 config->num_pipes_active += to_intel_crtc(crtc)->active;
3065
3066 /* FIXME: I don't think we need those two global parameters on SKL */
3067 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
3068 struct intel_plane *intel_plane = to_intel_plane(plane);
3069
3070 config->sprites_enabled |= intel_plane->wm.enabled;
3071 config->sprites_scaled |= intel_plane->wm.scaled;
3072 }
3073}
3074
3075static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
3076 struct skl_pipe_wm_parameters *p)
3077{
3078 struct drm_device *dev = crtc->dev;
3079 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3080 enum pipe pipe = intel_crtc->pipe;
3081 struct drm_plane *plane;
3082 struct drm_framebuffer *fb;
3083 int i = 1; /* Index for sprite planes start */
3084
3085 p->active = intel_crtc->active;
3086 if (p->active) {
3087 p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
3088 p->pixel_rate = skl_pipe_pixel_rate(intel_crtc->config);
3089
3090 fb = crtc->primary->state->fb;
3091 /* For planar: Bpp is for uv plane, y_Bpp is for y plane */
3092 if (fb) {
3093 p->plane[0].enabled = true;
3094 p->plane[0].bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ?
3095 drm_format_plane_cpp(fb->pixel_format, 1) :
3096 drm_format_plane_cpp(fb->pixel_format, 0);
3097 p->plane[0].y_bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ?
3098 drm_format_plane_cpp(fb->pixel_format, 0) : 0;
3099 p->plane[0].tiling = fb->modifier[0];
3100 } else {
3101 p->plane[0].enabled = false;
3102 p->plane[0].bytes_per_pixel = 0;
3103 p->plane[0].y_bytes_per_pixel = 0;
3104 p->plane[0].tiling = DRM_FORMAT_MOD_NONE;
3105 }
3106 p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w;
3107 p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h;
3108 p->plane[0].rotation = crtc->primary->state->rotation;
3109
3110 fb = crtc->cursor->state->fb;
3111 p->plane[PLANE_CURSOR].y_bytes_per_pixel = 0;
3112 if (fb) {
3113 p->plane[PLANE_CURSOR].enabled = true;
3114 p->plane[PLANE_CURSOR].bytes_per_pixel = fb->bits_per_pixel / 8;
3115 p->plane[PLANE_CURSOR].horiz_pixels = crtc->cursor->state->crtc_w;
3116 p->plane[PLANE_CURSOR].vert_pixels = crtc->cursor->state->crtc_h;
3117 } else {
3118 p->plane[PLANE_CURSOR].enabled = false;
3119 p->plane[PLANE_CURSOR].bytes_per_pixel = 0;
3120 p->plane[PLANE_CURSOR].horiz_pixels = 64;
3121 p->plane[PLANE_CURSOR].vert_pixels = 64;
3122 }
3123 }
3124
3125 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
3126 struct intel_plane *intel_plane = to_intel_plane(plane);
3127
3128 if (intel_plane->pipe == pipe &&
3129 plane->type == DRM_PLANE_TYPE_OVERLAY)
3130 p->plane[i++] = intel_plane->wm;
3131 }
3132}
3133
3134static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv, 3086static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3135 struct skl_pipe_wm_parameters *p, 3087 struct intel_crtc_state *cstate,
3136 struct intel_plane_wm_parameters *p_params, 3088 struct intel_plane *intel_plane,
3137 uint16_t ddb_allocation, 3089 uint16_t ddb_allocation,
3138 int level, 3090 int level,
3139 uint16_t *out_blocks, /* out */ 3091 uint16_t *out_blocks, /* out */
3140 uint8_t *out_lines /* out */) 3092 uint8_t *out_lines /* out */)
3141{ 3093{
3094 struct drm_plane *plane = &intel_plane->base;
3095 struct drm_framebuffer *fb = plane->state->fb;
3142 uint32_t latency = dev_priv->wm.skl_latency[level]; 3096 uint32_t latency = dev_priv->wm.skl_latency[level];
3143 uint32_t method1, method2; 3097 uint32_t method1, method2;
3144 uint32_t plane_bytes_per_line, plane_blocks_per_line; 3098 uint32_t plane_bytes_per_line, plane_blocks_per_line;
@@ -3146,31 +3100,33 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3146 uint32_t selected_result; 3100 uint32_t selected_result;
3147 uint8_t bytes_per_pixel; 3101 uint8_t bytes_per_pixel;
3148 3102
3149 if (latency == 0 || !p->active || !p_params->enabled) 3103 if (latency == 0 || !cstate->base.active || !fb)
3150 return false; 3104 return false;
3151 3105
3152 bytes_per_pixel = p_params->y_bytes_per_pixel ? 3106 bytes_per_pixel = drm_format_plane_cpp(fb->pixel_format, 0);
3153 p_params->y_bytes_per_pixel : 3107 method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate),
3154 p_params->bytes_per_pixel;
3155 method1 = skl_wm_method1(p->pixel_rate,
3156 bytes_per_pixel, 3108 bytes_per_pixel,
3157 latency); 3109 latency);
3158 method2 = skl_wm_method2(p->pixel_rate, 3110 method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate),
3159 p->pipe_htotal, 3111 cstate->base.adjusted_mode.crtc_htotal,
3160 p_params->horiz_pixels, 3112 cstate->pipe_src_w,
3161 bytes_per_pixel, 3113 bytes_per_pixel,
3162 p_params->tiling, 3114 fb->modifier[0],
3163 latency); 3115 latency);
3164 3116
3165 plane_bytes_per_line = p_params->horiz_pixels * bytes_per_pixel; 3117 plane_bytes_per_line = cstate->pipe_src_w * bytes_per_pixel;
3166 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); 3118 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3167 3119
3168 if (p_params->tiling == I915_FORMAT_MOD_Y_TILED || 3120 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
3169 p_params->tiling == I915_FORMAT_MOD_Yf_TILED) { 3121 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
3170 uint32_t min_scanlines = 4; 3122 uint32_t min_scanlines = 4;
3171 uint32_t y_tile_minimum; 3123 uint32_t y_tile_minimum;
3172 if (intel_rotation_90_or_270(p_params->rotation)) { 3124 if (intel_rotation_90_or_270(plane->state->rotation)) {
3173 switch (p_params->bytes_per_pixel) { 3125 int bpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
3126 drm_format_plane_cpp(fb->pixel_format, 1) :
3127 drm_format_plane_cpp(fb->pixel_format, 0);
3128
3129 switch (bpp) {
3174 case 1: 3130 case 1:
3175 min_scanlines = 16; 3131 min_scanlines = 16;
3176 break; 3132 break;
@@ -3194,8 +3150,8 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3194 res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line); 3150 res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line);
3195 3151
3196 if (level >= 1 && level <= 7) { 3152 if (level >= 1 && level <= 7) {
3197 if (p_params->tiling == I915_FORMAT_MOD_Y_TILED || 3153 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
3198 p_params->tiling == I915_FORMAT_MOD_Yf_TILED) 3154 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)
3199 res_lines += 4; 3155 res_lines += 4;
3200 else 3156 else
3201 res_blocks++; 3157 res_blocks++;
@@ -3212,84 +3168,80 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3212 3168
3213static void skl_compute_wm_level(const struct drm_i915_private *dev_priv, 3169static void skl_compute_wm_level(const struct drm_i915_private *dev_priv,
3214 struct skl_ddb_allocation *ddb, 3170 struct skl_ddb_allocation *ddb,
3215 struct skl_pipe_wm_parameters *p, 3171 struct intel_crtc_state *cstate,
3216 enum pipe pipe,
3217 int level, 3172 int level,
3218 int num_planes,
3219 struct skl_wm_level *result) 3173 struct skl_wm_level *result)
3220{ 3174{
3175 struct drm_device *dev = dev_priv->dev;
3176 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
3177 struct intel_plane *intel_plane;
3221 uint16_t ddb_blocks; 3178 uint16_t ddb_blocks;
3222 int i; 3179 enum pipe pipe = intel_crtc->pipe;
3180
3181 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3182 int i = skl_wm_plane_id(intel_plane);
3223 3183
3224 for (i = 0; i < num_planes; i++) {
3225 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]); 3184 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
3226 3185
3227 result->plane_en[i] = skl_compute_plane_wm(dev_priv, 3186 result->plane_en[i] = skl_compute_plane_wm(dev_priv,
3228 p, &p->plane[i], 3187 cstate,
3188 intel_plane,
3229 ddb_blocks, 3189 ddb_blocks,
3230 level, 3190 level,
3231 &result->plane_res_b[i], 3191 &result->plane_res_b[i],
3232 &result->plane_res_l[i]); 3192 &result->plane_res_l[i]);
3233 } 3193 }
3234
3235 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][PLANE_CURSOR]);
3236 result->plane_en[PLANE_CURSOR] = skl_compute_plane_wm(dev_priv, p,
3237 &p->plane[PLANE_CURSOR],
3238 ddb_blocks, level,
3239 &result->plane_res_b[PLANE_CURSOR],
3240 &result->plane_res_l[PLANE_CURSOR]);
3241} 3194}
3242 3195
3243static uint32_t 3196static uint32_t
3244skl_compute_linetime_wm(struct drm_crtc *crtc, struct skl_pipe_wm_parameters *p) 3197skl_compute_linetime_wm(struct intel_crtc_state *cstate)
3245{ 3198{
3246 if (!to_intel_crtc(crtc)->active) 3199 if (!cstate->base.active)
3247 return 0; 3200 return 0;
3248 3201
3249 if (WARN_ON(p->pixel_rate == 0)) 3202 if (WARN_ON(skl_pipe_pixel_rate(cstate) == 0))
3250 return 0; 3203 return 0;
3251 3204
3252 return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate); 3205 return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000,
3206 skl_pipe_pixel_rate(cstate));
3253} 3207}
3254 3208
3255static void skl_compute_transition_wm(struct drm_crtc *crtc, 3209static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
3256 struct skl_pipe_wm_parameters *params,
3257 struct skl_wm_level *trans_wm /* out */) 3210 struct skl_wm_level *trans_wm /* out */)
3258{ 3211{
3212 struct drm_crtc *crtc = cstate->base.crtc;
3259 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3213 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3260 int i; 3214 struct intel_plane *intel_plane;
3261 3215
3262 if (!params->active) 3216 if (!cstate->base.active)
3263 return; 3217 return;
3264 3218
3265 /* Until we know more, just disable transition WMs */ 3219 /* Until we know more, just disable transition WMs */
3266 for (i = 0; i < intel_num_planes(intel_crtc); i++) 3220 for_each_intel_plane_on_crtc(crtc->dev, intel_crtc, intel_plane) {
3221 int i = skl_wm_plane_id(intel_plane);
3222
3267 trans_wm->plane_en[i] = false; 3223 trans_wm->plane_en[i] = false;
3268 trans_wm->plane_en[PLANE_CURSOR] = false; 3224 }
3269} 3225}
3270 3226
3271static void skl_compute_pipe_wm(struct drm_crtc *crtc, 3227static void skl_compute_pipe_wm(struct intel_crtc_state *cstate,
3272 struct skl_ddb_allocation *ddb, 3228 struct skl_ddb_allocation *ddb,
3273 struct skl_pipe_wm_parameters *params,
3274 struct skl_pipe_wm *pipe_wm) 3229 struct skl_pipe_wm *pipe_wm)
3275{ 3230{
3276 struct drm_device *dev = crtc->dev; 3231 struct drm_device *dev = cstate->base.crtc->dev;
3277 const struct drm_i915_private *dev_priv = dev->dev_private; 3232 const struct drm_i915_private *dev_priv = dev->dev_private;
3278 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3279 int level, max_level = ilk_wm_max_level(dev); 3233 int level, max_level = ilk_wm_max_level(dev);
3280 3234
3281 for (level = 0; level <= max_level; level++) { 3235 for (level = 0; level <= max_level; level++) {
3282 skl_compute_wm_level(dev_priv, ddb, params, intel_crtc->pipe, 3236 skl_compute_wm_level(dev_priv, ddb, cstate,
3283 level, intel_num_planes(intel_crtc), 3237 level, &pipe_wm->wm[level]);
3284 &pipe_wm->wm[level]);
3285 } 3238 }
3286 pipe_wm->linetime = skl_compute_linetime_wm(crtc, params); 3239 pipe_wm->linetime = skl_compute_linetime_wm(cstate);
3287 3240
3288 skl_compute_transition_wm(crtc, params, &pipe_wm->trans_wm); 3241 skl_compute_transition_wm(cstate, &pipe_wm->trans_wm);
3289} 3242}
3290 3243
3291static void skl_compute_wm_results(struct drm_device *dev, 3244static void skl_compute_wm_results(struct drm_device *dev,
3292 struct skl_pipe_wm_parameters *p,
3293 struct skl_pipe_wm *p_wm, 3245 struct skl_pipe_wm *p_wm,
3294 struct skl_wm_values *r, 3246 struct skl_wm_values *r,
3295 struct intel_crtc *intel_crtc) 3247 struct intel_crtc *intel_crtc)
@@ -3346,7 +3298,8 @@ static void skl_compute_wm_results(struct drm_device *dev,
3346 r->wm_linetime[pipe] = p_wm->linetime; 3298 r->wm_linetime[pipe] = p_wm->linetime;
3347} 3299}
3348 3300
3349static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, uint32_t reg, 3301static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
3302 i915_reg_t reg,
3350 const struct skl_ddb_entry *entry) 3303 const struct skl_ddb_entry *entry)
3351{ 3304{
3352 if (entry->end) 3305 if (entry->end)
@@ -3533,28 +3486,25 @@ static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
3533} 3486}
3534 3487
3535static bool skl_update_pipe_wm(struct drm_crtc *crtc, 3488static bool skl_update_pipe_wm(struct drm_crtc *crtc,
3536 struct skl_pipe_wm_parameters *params,
3537 struct intel_wm_config *config,
3538 struct skl_ddb_allocation *ddb, /* out */ 3489 struct skl_ddb_allocation *ddb, /* out */
3539 struct skl_pipe_wm *pipe_wm /* out */) 3490 struct skl_pipe_wm *pipe_wm /* out */)
3540{ 3491{
3541 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3492 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3493 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3542 3494
3543 skl_compute_wm_pipe_parameters(crtc, params); 3495 skl_allocate_pipe_ddb(cstate, ddb);
3544 skl_allocate_pipe_ddb(crtc, config, params, ddb); 3496 skl_compute_pipe_wm(cstate, ddb, pipe_wm);
3545 skl_compute_pipe_wm(crtc, ddb, params, pipe_wm);
3546 3497
3547 if (!memcmp(&intel_crtc->wm.skl_active, pipe_wm, sizeof(*pipe_wm))) 3498 if (!memcmp(&intel_crtc->wm.active.skl, pipe_wm, sizeof(*pipe_wm)))
3548 return false; 3499 return false;
3549 3500
3550 intel_crtc->wm.skl_active = *pipe_wm; 3501 intel_crtc->wm.active.skl = *pipe_wm;
3551 3502
3552 return true; 3503 return true;
3553} 3504}
3554 3505
3555static void skl_update_other_pipe_wm(struct drm_device *dev, 3506static void skl_update_other_pipe_wm(struct drm_device *dev,
3556 struct drm_crtc *crtc, 3507 struct drm_crtc *crtc,
3557 struct intel_wm_config *config,
3558 struct skl_wm_values *r) 3508 struct skl_wm_values *r)
3559{ 3509{
3560 struct intel_crtc *intel_crtc; 3510 struct intel_crtc *intel_crtc;
@@ -3575,7 +3525,6 @@ static void skl_update_other_pipe_wm(struct drm_device *dev,
3575 */ 3525 */
3576 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, 3526 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
3577 base.head) { 3527 base.head) {
3578 struct skl_pipe_wm_parameters params = {};
3579 struct skl_pipe_wm pipe_wm = {}; 3528 struct skl_pipe_wm pipe_wm = {};
3580 bool wm_changed; 3529 bool wm_changed;
3581 3530
@@ -3586,7 +3535,6 @@ static void skl_update_other_pipe_wm(struct drm_device *dev,
3586 continue; 3535 continue;
3587 3536
3588 wm_changed = skl_update_pipe_wm(&intel_crtc->base, 3537 wm_changed = skl_update_pipe_wm(&intel_crtc->base,
3589 &params, config,
3590 &r->ddb, &pipe_wm); 3538 &r->ddb, &pipe_wm);
3591 3539
3592 /* 3540 /*
@@ -3596,7 +3544,7 @@ static void skl_update_other_pipe_wm(struct drm_device *dev,
3596 */ 3544 */
3597 WARN_ON(!wm_changed); 3545 WARN_ON(!wm_changed);
3598 3546
3599 skl_compute_wm_results(dev, &params, &pipe_wm, r, intel_crtc); 3547 skl_compute_wm_results(dev, &pipe_wm, r, intel_crtc);
3600 r->dirty[intel_crtc->pipe] = true; 3548 r->dirty[intel_crtc->pipe] = true;
3601 } 3549 }
3602} 3550}
@@ -3626,10 +3574,9 @@ static void skl_update_wm(struct drm_crtc *crtc)
3626 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3574 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3627 struct drm_device *dev = crtc->dev; 3575 struct drm_device *dev = crtc->dev;
3628 struct drm_i915_private *dev_priv = dev->dev_private; 3576 struct drm_i915_private *dev_priv = dev->dev_private;
3629 struct skl_pipe_wm_parameters params = {};
3630 struct skl_wm_values *results = &dev_priv->wm.skl_results; 3577 struct skl_wm_values *results = &dev_priv->wm.skl_results;
3631 struct skl_pipe_wm pipe_wm = {}; 3578 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3632 struct intel_wm_config config = {}; 3579 struct skl_pipe_wm *pipe_wm = &cstate->wm.optimal.skl;
3633 3580
3634 3581
3635 /* Clear all dirty flags */ 3582 /* Clear all dirty flags */
@@ -3637,16 +3584,13 @@ static void skl_update_wm(struct drm_crtc *crtc)
3637 3584
3638 skl_clear_wm(results, intel_crtc->pipe); 3585 skl_clear_wm(results, intel_crtc->pipe);
3639 3586
3640 skl_compute_wm_global_parameters(dev, &config); 3587 if (!skl_update_pipe_wm(crtc, &results->ddb, pipe_wm))
3641
3642 if (!skl_update_pipe_wm(crtc, &params, &config,
3643 &results->ddb, &pipe_wm))
3644 return; 3588 return;
3645 3589
3646 skl_compute_wm_results(dev, &params, &pipe_wm, results, intel_crtc); 3590 skl_compute_wm_results(dev, pipe_wm, results, intel_crtc);
3647 results->dirty[intel_crtc->pipe] = true; 3591 results->dirty[intel_crtc->pipe] = true;
3648 3592
3649 skl_update_other_pipe_wm(dev, crtc, &config, results); 3593 skl_update_other_pipe_wm(dev, crtc, results);
3650 skl_write_wm_values(dev_priv, results); 3594 skl_write_wm_values(dev_priv, results);
3651 skl_flush_wm_values(dev_priv, results); 3595 skl_flush_wm_values(dev_priv, results);
3652 3596
@@ -3654,71 +3598,23 @@ static void skl_update_wm(struct drm_crtc *crtc)
3654 dev_priv->wm.skl_hw = *results; 3598 dev_priv->wm.skl_hw = *results;
3655} 3599}
3656 3600
3657static void 3601static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
3658skl_update_sprite_wm(struct drm_plane *plane, struct drm_crtc *crtc,
3659 uint32_t sprite_width, uint32_t sprite_height,
3660 int pixel_size, bool enabled, bool scaled)
3661{
3662 struct intel_plane *intel_plane = to_intel_plane(plane);
3663 struct drm_framebuffer *fb = plane->state->fb;
3664
3665 intel_plane->wm.enabled = enabled;
3666 intel_plane->wm.scaled = scaled;
3667 intel_plane->wm.horiz_pixels = sprite_width;
3668 intel_plane->wm.vert_pixels = sprite_height;
3669 intel_plane->wm.tiling = DRM_FORMAT_MOD_NONE;
3670
3671 /* For planar: Bpp is for UV plane, y_Bpp is for Y plane */
3672 intel_plane->wm.bytes_per_pixel =
3673 (fb && fb->pixel_format == DRM_FORMAT_NV12) ?
3674 drm_format_plane_cpp(plane->state->fb->pixel_format, 1) : pixel_size;
3675 intel_plane->wm.y_bytes_per_pixel =
3676 (fb && fb->pixel_format == DRM_FORMAT_NV12) ?
3677 drm_format_plane_cpp(plane->state->fb->pixel_format, 0) : 0;
3678
3679 /*
3680 * Framebuffer can be NULL on plane disable, but it does not
3681 * matter for watermarks if we assume no tiling in that case.
3682 */
3683 if (fb)
3684 intel_plane->wm.tiling = fb->modifier[0];
3685 intel_plane->wm.rotation = plane->state->rotation;
3686
3687 skl_update_wm(crtc);
3688}
3689
3690static void ilk_update_wm(struct drm_crtc *crtc)
3691{ 3602{
3692 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3603 struct drm_device *dev = dev_priv->dev;
3693 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 3604 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
3694 struct drm_device *dev = crtc->dev;
3695 struct drm_i915_private *dev_priv = dev->dev_private;
3696 struct ilk_wm_maximums max; 3605 struct ilk_wm_maximums max;
3606 struct intel_wm_config *config = &dev_priv->wm.config;
3697 struct ilk_wm_values results = {}; 3607 struct ilk_wm_values results = {};
3698 enum intel_ddb_partitioning partitioning; 3608 enum intel_ddb_partitioning partitioning;
3699 struct intel_pipe_wm pipe_wm = {};
3700 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
3701 struct intel_wm_config config = {};
3702
3703 WARN_ON(cstate->base.active != intel_crtc->active);
3704 3609
3705 intel_compute_pipe_wm(cstate, &pipe_wm); 3610 ilk_compute_wm_maximums(dev, 1, config, INTEL_DDB_PART_1_2, &max);
3706 3611 ilk_wm_merge(dev, config, &max, &lp_wm_1_2);
3707 if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
3708 return;
3709
3710 intel_crtc->wm.active = pipe_wm;
3711
3712 ilk_compute_wm_config(dev, &config);
3713
3714 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
3715 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
3716 3612
3717 /* 5/6 split only in single pipe config on IVB+ */ 3613 /* 5/6 split only in single pipe config on IVB+ */
3718 if (INTEL_INFO(dev)->gen >= 7 && 3614 if (INTEL_INFO(dev)->gen >= 7 &&
3719 config.num_pipes_active == 1 && config.sprites_enabled) { 3615 config->num_pipes_active == 1 && config->sprites_enabled) {
3720 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max); 3616 ilk_compute_wm_maximums(dev, 1, config, INTEL_DDB_PART_5_6, &max);
3721 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6); 3617 ilk_wm_merge(dev, config, &max, &lp_wm_5_6);
3722 3618
3723 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6); 3619 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
3724 } else { 3620 } else {
@@ -3733,14 +3629,13 @@ static void ilk_update_wm(struct drm_crtc *crtc)
3733 ilk_write_wm_values(dev_priv, &results); 3629 ilk_write_wm_values(dev_priv, &results);
3734} 3630}
3735 3631
3736static void 3632static void ilk_update_wm(struct drm_crtc *crtc)
3737ilk_update_sprite_wm(struct drm_plane *plane,
3738 struct drm_crtc *crtc,
3739 uint32_t sprite_width, uint32_t sprite_height,
3740 int pixel_size, bool enabled, bool scaled)
3741{ 3633{
3742 struct drm_device *dev = plane->dev; 3634 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
3743 struct intel_plane *intel_plane = to_intel_plane(plane); 3635 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3636 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3637
3638 WARN_ON(cstate->base.active != intel_crtc->active);
3744 3639
3745 /* 3640 /*
3746 * IVB workaround: must disable low power watermarks for at least 3641 * IVB workaround: must disable low power watermarks for at least
@@ -3749,10 +3644,14 @@ ilk_update_sprite_wm(struct drm_plane *plane,
3749 * 3644 *
3750 * WaCxSRDisabledForSpriteScaling:ivb 3645 * WaCxSRDisabledForSpriteScaling:ivb
3751 */ 3646 */
3752 if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev)) 3647 if (cstate->disable_lp_wm) {
3753 intel_wait_for_vblank(dev, intel_plane->pipe); 3648 ilk_disable_lp_wm(crtc->dev);
3649 intel_wait_for_vblank(crtc->dev, intel_crtc->pipe);
3650 }
3754 3651
3755 ilk_update_wm(crtc); 3652 intel_crtc->wm.active.ilk = cstate->wm.optimal.ilk;
3653
3654 ilk_program_watermarks(dev_priv);
3756} 3655}
3757 3656
3758static void skl_pipe_wm_active_state(uint32_t val, 3657static void skl_pipe_wm_active_state(uint32_t val,
@@ -3805,7 +3704,8 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3805 struct drm_i915_private *dev_priv = dev->dev_private; 3704 struct drm_i915_private *dev_priv = dev->dev_private;
3806 struct skl_wm_values *hw = &dev_priv->wm.skl_hw; 3705 struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
3807 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3706 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3808 struct skl_pipe_wm *active = &intel_crtc->wm.skl_active; 3707 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3708 struct skl_pipe_wm *active = &cstate->wm.optimal.skl;
3809 enum pipe pipe = intel_crtc->pipe; 3709 enum pipe pipe = intel_crtc->pipe;
3810 int level, i, max_level; 3710 int level, i, max_level;
3811 uint32_t temp; 3711 uint32_t temp;
@@ -3849,6 +3749,8 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3849 3749
3850 temp = hw->plane_trans[pipe][PLANE_CURSOR]; 3750 temp = hw->plane_trans[pipe][PLANE_CURSOR];
3851 skl_pipe_wm_active_state(temp, active, true, true, i, 0); 3751 skl_pipe_wm_active_state(temp, active, true, true, i, 0);
3752
3753 intel_crtc->wm.active.skl = *active;
3852} 3754}
3853 3755
3854void skl_wm_get_hw_state(struct drm_device *dev) 3756void skl_wm_get_hw_state(struct drm_device *dev)
@@ -3868,9 +3770,10 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3868 struct drm_i915_private *dev_priv = dev->dev_private; 3770 struct drm_i915_private *dev_priv = dev->dev_private;
3869 struct ilk_wm_values *hw = &dev_priv->wm.hw; 3771 struct ilk_wm_values *hw = &dev_priv->wm.hw;
3870 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3772 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3871 struct intel_pipe_wm *active = &intel_crtc->wm.active; 3773 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3774 struct intel_pipe_wm *active = &cstate->wm.optimal.ilk;
3872 enum pipe pipe = intel_crtc->pipe; 3775 enum pipe pipe = intel_crtc->pipe;
3873 static const unsigned int wm0_pipe_reg[] = { 3776 static const i915_reg_t wm0_pipe_reg[] = {
3874 [PIPE_A] = WM0_PIPEA_ILK, 3777 [PIPE_A] = WM0_PIPEA_ILK,
3875 [PIPE_B] = WM0_PIPEB_ILK, 3778 [PIPE_B] = WM0_PIPEB_ILK,
3876 [PIPE_C] = WM0_PIPEC_IVB, 3779 [PIPE_C] = WM0_PIPEC_IVB,
@@ -3907,6 +3810,8 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3907 for (level = 0; level <= max_level; level++) 3810 for (level = 0; level <= max_level; level++)
3908 active->wm[level].enable = true; 3811 active->wm[level].enable = true;
3909 } 3812 }
3813
3814 intel_crtc->wm.active.ilk = *active;
3910} 3815}
3911 3816
3912#define _FW_WM(value, plane) \ 3817#define _FW_WM(value, plane) \
@@ -4132,21 +4037,6 @@ void intel_update_watermarks(struct drm_crtc *crtc)
4132 dev_priv->display.update_wm(crtc); 4037 dev_priv->display.update_wm(crtc);
4133} 4038}
4134 4039
4135void intel_update_sprite_watermarks(struct drm_plane *plane,
4136 struct drm_crtc *crtc,
4137 uint32_t sprite_width,
4138 uint32_t sprite_height,
4139 int pixel_size,
4140 bool enabled, bool scaled)
4141{
4142 struct drm_i915_private *dev_priv = plane->dev->dev_private;
4143
4144 if (dev_priv->display.update_sprite_wm)
4145 dev_priv->display.update_sprite_wm(plane, crtc,
4146 sprite_width, sprite_height,
4147 pixel_size, enabled, scaled);
4148}
4149
4150/** 4040/**
4151 * Lock protecting IPS related data structures 4041 * Lock protecting IPS related data structures
4152 */ 4042 */
@@ -4414,7 +4304,7 @@ static void gen6_set_rps(struct drm_device *dev, u8 val)
4414 struct drm_i915_private *dev_priv = dev->dev_private; 4304 struct drm_i915_private *dev_priv = dev->dev_private;
4415 4305
4416 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */ 4306 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
4417 if (IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) 4307 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
4418 return; 4308 return;
4419 4309
4420 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 4310 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -4449,7 +4339,7 @@ static void gen6_set_rps(struct drm_device *dev, u8 val)
4449 POSTING_READ(GEN6_RPNSWREQ); 4339 POSTING_READ(GEN6_RPNSWREQ);
4450 4340
4451 dev_priv->rps.cur_freq = val; 4341 dev_priv->rps.cur_freq = val;
4452 trace_intel_gpu_freq_change(val * 50); 4342 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
4453} 4343}
4454 4344
4455static void valleyview_set_rps(struct drm_device *dev, u8 val) 4345static void valleyview_set_rps(struct drm_device *dev, u8 val)
@@ -4689,7 +4579,8 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
4689 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq; 4579 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
4690 4580
4691 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq; 4581 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
4692 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || IS_SKYLAKE(dev)) { 4582 if (IS_HASWELL(dev) || IS_BROADWELL(dev) ||
4583 IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
4693 ret = sandybridge_pcode_read(dev_priv, 4584 ret = sandybridge_pcode_read(dev_priv,
4694 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, 4585 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
4695 &ddcc_status); 4586 &ddcc_status);
@@ -4701,7 +4592,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
4701 dev_priv->rps.max_freq); 4592 dev_priv->rps.max_freq);
4702 } 4593 }
4703 4594
4704 if (IS_SKYLAKE(dev)) { 4595 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
4705 /* Store the frequency values in 16.66 MHZ units, which is 4596 /* Store the frequency values in 16.66 MHZ units, which is
4706 the natural hardware unit for SKL */ 4597 the natural hardware unit for SKL */
4707 dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER; 4598 dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
@@ -4738,7 +4629,7 @@ static void gen9_enable_rps(struct drm_device *dev)
4738 gen6_init_rps_frequencies(dev); 4629 gen6_init_rps_frequencies(dev);
4739 4630
4740 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */ 4631 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
4741 if (IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) { 4632 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
4742 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 4633 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4743 return; 4634 return;
4744 } 4635 }
@@ -4783,7 +4674,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
4783 4674
4784 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */ 4675 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
4785 if (IS_SKYLAKE(dev) && !((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && 4676 if (IS_SKYLAKE(dev) && !((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) &&
4786 (INTEL_REVID(dev) <= SKL_REVID_E0))) 4677 IS_SKL_REVID(dev, 0, SKL_REVID_E0)))
4787 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16); 4678 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
4788 else 4679 else
4789 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); 4680 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
@@ -4807,8 +4698,8 @@ static void gen9_enable_rc6(struct drm_device *dev)
4807 DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? 4698 DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
4808 "on" : "off"); 4699 "on" : "off");
4809 /* WaRsUseTimeoutMode */ 4700 /* WaRsUseTimeoutMode */
4810 if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_D0) || 4701 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
4811 (IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_A0)) { 4702 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
4812 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */ 4703 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */
4813 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | 4704 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4814 GEN7_RC_CTL_TO_MODE | 4705 GEN7_RC_CTL_TO_MODE |
@@ -4824,8 +4715,9 @@ static void gen9_enable_rc6(struct drm_device *dev)
4824 * 3b: Enable Coarse Power Gating only when RC6 is enabled. 4715 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
4825 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6. 4716 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
4826 */ 4717 */
4827 if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) || 4718 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1) ||
4828 ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_E0))) 4719 ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) &&
4720 IS_SKL_REVID(dev, 0, SKL_REVID_E0)))
4829 I915_WRITE(GEN9_PG_ENABLE, 0); 4721 I915_WRITE(GEN9_PG_ENABLE, 0);
4830 else 4722 else
4831 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? 4723 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
@@ -5056,7 +4948,7 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
5056 /* convert DDR frequency from units of 266.6MHz to bandwidth */ 4948 /* convert DDR frequency from units of 266.6MHz to bandwidth */
5057 min_ring_freq = mult_frac(min_ring_freq, 8, 3); 4949 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
5058 4950
5059 if (IS_SKYLAKE(dev)) { 4951 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
5060 /* Convert GT frequency to 50 HZ units */ 4952 /* Convert GT frequency to 50 HZ units */
5061 min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER; 4953 min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
5062 max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER; 4954 max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
@@ -5074,7 +4966,7 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
5074 int diff = max_gpu_freq - gpu_freq; 4966 int diff = max_gpu_freq - gpu_freq;
5075 unsigned int ia_freq = 0, ring_freq = 0; 4967 unsigned int ia_freq = 0, ring_freq = 0;
5076 4968
5077 if (IS_SKYLAKE(dev)) { 4969 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
5078 /* 4970 /*
5079 * ring_freq = 2 * GT. ring_freq is in 100MHz units 4971 * ring_freq = 2 * GT. ring_freq is in 100MHz units
5080 * No floor required for ring frequency on SKL. 4972 * No floor required for ring frequency on SKL.
@@ -6202,7 +6094,7 @@ static void intel_gen6_powersave_work(struct work_struct *work)
6202 } else if (INTEL_INFO(dev)->gen >= 9) { 6094 } else if (INTEL_INFO(dev)->gen >= 9) {
6203 gen9_enable_rc6(dev); 6095 gen9_enable_rc6(dev);
6204 gen9_enable_rps(dev); 6096 gen9_enable_rps(dev);
6205 if (IS_SKYLAKE(dev)) 6097 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
6206 __gen6_update_ring_freq(dev); 6098 __gen6_update_ring_freq(dev);
6207 } else if (IS_BROADWELL(dev)) { 6099 } else if (IS_BROADWELL(dev)) {
6208 gen8_enable_rps(dev); 6100 gen8_enable_rps(dev);
@@ -7058,7 +6950,6 @@ void intel_init_pm(struct drm_device *dev)
7058 dev_priv->display.init_clock_gating = 6950 dev_priv->display.init_clock_gating =
7059 bxt_init_clock_gating; 6951 bxt_init_clock_gating;
7060 dev_priv->display.update_wm = skl_update_wm; 6952 dev_priv->display.update_wm = skl_update_wm;
7061 dev_priv->display.update_sprite_wm = skl_update_sprite_wm;
7062 } else if (HAS_PCH_SPLIT(dev)) { 6953 } else if (HAS_PCH_SPLIT(dev)) {
7063 ilk_setup_wm_latency(dev); 6954 ilk_setup_wm_latency(dev);
7064 6955
@@ -7067,7 +6958,7 @@ void intel_init_pm(struct drm_device *dev)
7067 (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] && 6958 (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
7068 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) { 6959 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
7069 dev_priv->display.update_wm = ilk_update_wm; 6960 dev_priv->display.update_wm = ilk_update_wm;
7070 dev_priv->display.update_sprite_wm = ilk_update_sprite_wm; 6961 dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
7071 } else { 6962 } else {
7072 DRM_DEBUG_KMS("Failed to read display plane latency. " 6963 DRM_DEBUG_KMS("Failed to read display plane latency. "
7073 "Disable CxSR\n"); 6964 "Disable CxSR\n");
@@ -7255,7 +7146,8 @@ static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
7255int intel_gpu_freq(struct drm_i915_private *dev_priv, int val) 7146int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
7256{ 7147{
7257 if (IS_GEN9(dev_priv->dev)) 7148 if (IS_GEN9(dev_priv->dev))
7258 return (val * GT_FREQUENCY_MULTIPLIER) / GEN9_FREQ_SCALER; 7149 return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
7150 GEN9_FREQ_SCALER);
7259 else if (IS_CHERRYVIEW(dev_priv->dev)) 7151 else if (IS_CHERRYVIEW(dev_priv->dev))
7260 return chv_gpu_freq(dev_priv, val); 7152 return chv_gpu_freq(dev_priv, val);
7261 else if (IS_VALLEYVIEW(dev_priv->dev)) 7153 else if (IS_VALLEYVIEW(dev_priv->dev))
@@ -7267,13 +7159,14 @@ int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
7267int intel_freq_opcode(struct drm_i915_private *dev_priv, int val) 7159int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
7268{ 7160{
7269 if (IS_GEN9(dev_priv->dev)) 7161 if (IS_GEN9(dev_priv->dev))
7270 return (val * GEN9_FREQ_SCALER) / GT_FREQUENCY_MULTIPLIER; 7162 return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
7163 GT_FREQUENCY_MULTIPLIER);
7271 else if (IS_CHERRYVIEW(dev_priv->dev)) 7164 else if (IS_CHERRYVIEW(dev_priv->dev))
7272 return chv_freq_opcode(dev_priv, val); 7165 return chv_freq_opcode(dev_priv, val);
7273 else if (IS_VALLEYVIEW(dev_priv->dev)) 7166 else if (IS_VALLEYVIEW(dev_priv->dev))
7274 return byt_freq_opcode(dev_priv, val); 7167 return byt_freq_opcode(dev_priv, val);
7275 else 7168 else
7276 return val / GT_FREQUENCY_MULTIPLIER; 7169 return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
7277} 7170}
7278 7171
7279struct request_boost { 7172struct request_boost {
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 213581c215b3..b6609e648f75 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -80,7 +80,7 @@ static void intel_psr_write_vsc(struct intel_dp *intel_dp,
80 struct drm_i915_private *dev_priv = dev->dev_private; 80 struct drm_i915_private *dev_priv = dev->dev_private;
81 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 81 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
82 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 82 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
83 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder); 83 i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
84 uint32_t *data = (uint32_t *) vsc_psr; 84 uint32_t *data = (uint32_t *) vsc_psr;
85 unsigned int i; 85 unsigned int i;
86 86
@@ -151,13 +151,31 @@ static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
151 DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE); 151 DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
152} 152}
153 153
154static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
155 enum port port)
156{
157 if (INTEL_INFO(dev_priv)->gen >= 9)
158 return DP_AUX_CH_CTL(port);
159 else
160 return EDP_PSR_AUX_CTL;
161}
162
163static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
164 enum port port, int index)
165{
166 if (INTEL_INFO(dev_priv)->gen >= 9)
167 return DP_AUX_CH_DATA(port, index);
168 else
169 return EDP_PSR_AUX_DATA(index);
170}
171
154static void hsw_psr_enable_sink(struct intel_dp *intel_dp) 172static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
155{ 173{
156 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 174 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
157 struct drm_device *dev = dig_port->base.base.dev; 175 struct drm_device *dev = dig_port->base.base.dev;
158 struct drm_i915_private *dev_priv = dev->dev_private; 176 struct drm_i915_private *dev_priv = dev->dev_private;
159 uint32_t aux_clock_divider; 177 uint32_t aux_clock_divider;
160 uint32_t aux_data_reg, aux_ctl_reg; 178 i915_reg_t aux_ctl_reg;
161 int precharge = 0x3; 179 int precharge = 0x3;
162 static const uint8_t aux_msg[] = { 180 static const uint8_t aux_msg[] = {
163 [0] = DP_AUX_NATIVE_WRITE << 4, 181 [0] = DP_AUX_NATIVE_WRITE << 4,
@@ -166,29 +184,24 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
166 [3] = 1 - 1, 184 [3] = 1 - 1,
167 [4] = DP_SET_POWER_D0, 185 [4] = DP_SET_POWER_D0,
168 }; 186 };
187 enum port port = dig_port->port;
169 int i; 188 int i;
170 189
171 BUILD_BUG_ON(sizeof(aux_msg) > 20); 190 BUILD_BUG_ON(sizeof(aux_msg) > 20);
172 191
173 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); 192 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
174 193
175 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
176 DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
177
178 /* Enable AUX frame sync at sink */ 194 /* Enable AUX frame sync at sink */
179 if (dev_priv->psr.aux_frame_sync) 195 if (dev_priv->psr.aux_frame_sync)
180 drm_dp_dpcd_writeb(&intel_dp->aux, 196 drm_dp_dpcd_writeb(&intel_dp->aux,
181 DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF, 197 DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
182 DP_AUX_FRAME_SYNC_ENABLE); 198 DP_AUX_FRAME_SYNC_ENABLE);
183 199
184 aux_data_reg = (INTEL_INFO(dev)->gen >= 9) ? 200 aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port);
185 DPA_AUX_CH_DATA1 : EDP_PSR_AUX_DATA1(dev);
186 aux_ctl_reg = (INTEL_INFO(dev)->gen >= 9) ?
187 DPA_AUX_CH_CTL : EDP_PSR_AUX_CTL(dev);
188 201
189 /* Setup AUX registers */ 202 /* Setup AUX registers */
190 for (i = 0; i < sizeof(aux_msg); i += 4) 203 for (i = 0; i < sizeof(aux_msg); i += 4)
191 I915_WRITE(aux_data_reg + i, 204 I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2),
192 intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i)); 205 intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
193 206
194 if (INTEL_INFO(dev)->gen >= 9) { 207 if (INTEL_INFO(dev)->gen >= 9) {
@@ -267,16 +280,11 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
267 const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; 280 const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
268 281
269 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) { 282 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
270 /* It doesn't mean we shouldn't send TPS patters, so let's
271 send the minimal TP1 possible and skip TP2. */
272 val |= EDP_PSR_TP1_TIME_100us;
273 val |= EDP_PSR_TP2_TP3_TIME_0us;
274 val |= EDP_PSR_SKIP_AUX_EXIT;
275 /* Sink should be able to train with the 5 or 6 idle patterns */ 283 /* Sink should be able to train with the 5 or 6 idle patterns */
276 idle_frames += 4; 284 idle_frames += 4;
277 } 285 }
278 286
279 I915_WRITE(EDP_PSR_CTL(dev), val | 287 I915_WRITE(EDP_PSR_CTL, val |
280 (IS_BROADWELL(dev) ? 0 : link_entry_time) | 288 (IS_BROADWELL(dev) ? 0 : link_entry_time) |
281 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | 289 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
282 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | 290 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
@@ -340,7 +348,7 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
340 struct drm_device *dev = intel_dig_port->base.base.dev; 348 struct drm_device *dev = intel_dig_port->base.base.dev;
341 struct drm_i915_private *dev_priv = dev->dev_private; 349 struct drm_i915_private *dev_priv = dev->dev_private;
342 350
343 WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE); 351 WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
344 WARN_ON(dev_priv->psr.active); 352 WARN_ON(dev_priv->psr.active);
345 lockdep_assert_held(&dev_priv->psr.lock); 353 lockdep_assert_held(&dev_priv->psr.lock);
346 354
@@ -403,9 +411,14 @@ void intel_psr_enable(struct intel_dp *intel_dp)
403 skl_psr_setup_su_vsc(intel_dp); 411 skl_psr_setup_su_vsc(intel_dp);
404 } 412 }
405 413
406 /* Avoid continuous PSR exit by masking memup and hpd */ 414 /*
407 I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP | 415 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD.
408 EDP_PSR_DEBUG_MASK_HPD); 416 * Also mask LPSP to avoid dependency on other drivers that
417 * might block runtime_pm besides preventing other hw tracking
418 * issues now we can rely on frontbuffer tracking.
419 */
420 I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
421 EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
409 422
410 /* Enable PSR on the panel */ 423 /* Enable PSR on the panel */
411 hsw_psr_enable_sink(intel_dp); 424 hsw_psr_enable_sink(intel_dp);
@@ -427,6 +440,19 @@ void intel_psr_enable(struct intel_dp *intel_dp)
427 vlv_psr_enable_source(intel_dp); 440 vlv_psr_enable_source(intel_dp);
428 } 441 }
429 442
443 /*
444 * FIXME: Activation should happen immediately since this function
445 * is just called after pipe is fully trained and enabled.
446 * However on every platform we face issues when first activation
447 * follows a modeset so quickly.
448 * - On VLV/CHV we get bank screen on first activation
449 * - On HSW/BDW we get a recoverable frozen screen until next
450 * exit-activate sequence.
451 */
452 if (INTEL_INFO(dev)->gen < 9)
453 schedule_delayed_work(&dev_priv->psr.work,
454 msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
455
430 dev_priv->psr.enabled = intel_dp; 456 dev_priv->psr.enabled = intel_dp;
431unlock: 457unlock:
432 mutex_unlock(&dev_priv->psr.lock); 458 mutex_unlock(&dev_priv->psr.lock);
@@ -466,17 +492,17 @@ static void hsw_psr_disable(struct intel_dp *intel_dp)
466 struct drm_i915_private *dev_priv = dev->dev_private; 492 struct drm_i915_private *dev_priv = dev->dev_private;
467 493
468 if (dev_priv->psr.active) { 494 if (dev_priv->psr.active) {
469 I915_WRITE(EDP_PSR_CTL(dev), 495 I915_WRITE(EDP_PSR_CTL,
470 I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE); 496 I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
471 497
472 /* Wait till PSR is idle */ 498 /* Wait till PSR is idle */
473 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) & 499 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
474 EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10)) 500 EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
475 DRM_ERROR("Timed out waiting for PSR Idle State\n"); 501 DRM_ERROR("Timed out waiting for PSR Idle State\n");
476 502
477 dev_priv->psr.active = false; 503 dev_priv->psr.active = false;
478 } else { 504 } else {
479 WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE); 505 WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
480 } 506 }
481} 507}
482 508
@@ -498,11 +524,15 @@ void intel_psr_disable(struct intel_dp *intel_dp)
498 return; 524 return;
499 } 525 }
500 526
527 /* Disable PSR on Source */
501 if (HAS_DDI(dev)) 528 if (HAS_DDI(dev))
502 hsw_psr_disable(intel_dp); 529 hsw_psr_disable(intel_dp);
503 else 530 else
504 vlv_psr_disable(intel_dp); 531 vlv_psr_disable(intel_dp);
505 532
533 /* Disable PSR on Sink */
534 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
535
506 dev_priv->psr.enabled = NULL; 536 dev_priv->psr.enabled = NULL;
507 mutex_unlock(&dev_priv->psr.lock); 537 mutex_unlock(&dev_priv->psr.lock);
508 538
@@ -523,7 +553,7 @@ static void intel_psr_work(struct work_struct *work)
523 * and be ready for re-enable. 553 * and be ready for re-enable.
524 */ 554 */
525 if (HAS_DDI(dev_priv->dev)) { 555 if (HAS_DDI(dev_priv->dev)) {
526 if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) & 556 if (wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
527 EDP_PSR_STATUS_STATE_MASK) == 0, 50)) { 557 EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
528 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n"); 558 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
529 return; 559 return;
@@ -566,11 +596,11 @@ static void intel_psr_exit(struct drm_device *dev)
566 return; 596 return;
567 597
568 if (HAS_DDI(dev)) { 598 if (HAS_DDI(dev)) {
569 val = I915_READ(EDP_PSR_CTL(dev)); 599 val = I915_READ(EDP_PSR_CTL);
570 600
571 WARN_ON(!(val & EDP_PSR_ENABLE)); 601 WARN_ON(!(val & EDP_PSR_ENABLE));
572 602
573 I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE); 603 I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
574 } else { 604 } else {
575 val = I915_READ(VLV_PSRCTL(pipe)); 605 val = I915_READ(VLV_PSRCTL(pipe));
576 606
@@ -700,7 +730,6 @@ void intel_psr_flush(struct drm_device *dev,
700 struct drm_i915_private *dev_priv = dev->dev_private; 730 struct drm_i915_private *dev_priv = dev->dev_private;
701 struct drm_crtc *crtc; 731 struct drm_crtc *crtc;
702 enum pipe pipe; 732 enum pipe pipe;
703 int delay_ms = HAS_DDI(dev) ? 100 : 500;
704 733
705 mutex_lock(&dev_priv->psr.lock); 734 mutex_lock(&dev_priv->psr.lock);
706 if (!dev_priv->psr.enabled) { 735 if (!dev_priv->psr.enabled) {
@@ -714,29 +743,14 @@ void intel_psr_flush(struct drm_device *dev,
714 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); 743 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
715 dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits; 744 dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
716 745
717 if (HAS_DDI(dev)) { 746 /* By definition flush = invalidate + flush */
718 /* 747 if (frontbuffer_bits)
719 * By definition every flush should mean invalidate + flush, 748 intel_psr_exit(dev);
720 * however on core platforms let's minimize the
721 * disable/re-enable so we can avoid the invalidate when flip
722 * originated the flush.
723 */
724 if (frontbuffer_bits && origin != ORIGIN_FLIP)
725 intel_psr_exit(dev);
726 } else {
727 /*
728 * On Valleyview and Cherryview we don't use hardware tracking
729 * so any plane updates or cursor moves don't result in a PSR
730 * invalidating. Which means we need to manually fake this in
731 * software for all flushes.
732 */
733 if (frontbuffer_bits)
734 intel_psr_exit(dev);
735 }
736 749
737 if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) 750 if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
738 schedule_delayed_work(&dev_priv->psr.work, 751 if (!work_busy(&dev_priv->psr.work.work))
739 msecs_to_jiffies(delay_ms)); 752 schedule_delayed_work(&dev_priv->psr.work,
753 msecs_to_jiffies(100));
740 mutex_unlock(&dev_priv->psr.lock); 754 mutex_unlock(&dev_priv->psr.lock);
741} 755}
742 756
@@ -751,6 +765,9 @@ void intel_psr_init(struct drm_device *dev)
751{ 765{
752 struct drm_i915_private *dev_priv = dev->dev_private; 766 struct drm_i915_private *dev_priv = dev->dev_private;
753 767
768 dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
769 HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
770
754 INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work); 771 INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
755 mutex_init(&dev_priv->psr.lock); 772 mutex_init(&dev_priv->psr.lock);
756} 773}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 9461a238f5d5..57d78f264b53 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -481,7 +481,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *ring)
481{ 481{
482 struct drm_device *dev = ring->dev; 482 struct drm_device *dev = ring->dev;
483 struct drm_i915_private *dev_priv = ring->dev->dev_private; 483 struct drm_i915_private *dev_priv = ring->dev->dev_private;
484 u32 mmio = 0; 484 i915_reg_t mmio;
485 485
486 /* The ring status page addresses are no longer next to the rest of 486 /* The ring status page addresses are no longer next to the rest of
487 * the ring registers as of gen7. 487 * the ring registers as of gen7.
@@ -524,7 +524,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *ring)
524 * invalidating the TLB? 524 * invalidating the TLB?
525 */ 525 */
526 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) { 526 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
527 u32 reg = RING_INSTPM(ring->mmio_base); 527 i915_reg_t reg = RING_INSTPM(ring->mmio_base);
528 528
529 /* ring should be idle before issuing a sync flush*/ 529 /* ring should be idle before issuing a sync flush*/
530 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); 530 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
@@ -733,7 +733,7 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
733 733
734 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count)); 734 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
735 for (i = 0; i < w->count; i++) { 735 for (i = 0; i < w->count; i++) {
736 intel_ring_emit(ring, w->reg[i].addr); 736 intel_ring_emit_reg(ring, w->reg[i].addr);
737 intel_ring_emit(ring, w->reg[i].value); 737 intel_ring_emit(ring, w->reg[i].value);
738 } 738 }
739 intel_ring_emit(ring, MI_NOOP); 739 intel_ring_emit(ring, MI_NOOP);
@@ -766,7 +766,8 @@ static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
766} 766}
767 767
768static int wa_add(struct drm_i915_private *dev_priv, 768static int wa_add(struct drm_i915_private *dev_priv,
769 const u32 addr, const u32 mask, const u32 val) 769 i915_reg_t addr,
770 const u32 mask, const u32 val)
770{ 771{
771 const u32 idx = dev_priv->workarounds.count; 772 const u32 idx = dev_priv->workarounds.count;
772 773
@@ -924,17 +925,15 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
924 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, 925 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
925 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC); 926 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
926 927
927 if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) == SKL_REVID_A0 || 928 /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
928 INTEL_REVID(dev) == SKL_REVID_B0)) || 929 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
929 (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) { 930 IS_BXT_REVID(dev, 0, BXT_REVID_A1))
930 /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
931 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, 931 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
932 GEN9_DG_MIRROR_FIX_ENABLE); 932 GEN9_DG_MIRROR_FIX_ENABLE);
933 }
934 933
935 if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) || 934 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
936 (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) { 935 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
937 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ 936 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
938 WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1, 937 WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
939 GEN9_RHWO_OPTIMIZATION_DISABLE); 938 GEN9_RHWO_OPTIMIZATION_DISABLE);
940 /* 939 /*
@@ -944,12 +943,10 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
944 */ 943 */
945 } 944 }
946 945
947 if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) >= SKL_REVID_C0) || 946 /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */
948 IS_BROXTON(dev)) { 947 if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER) || IS_BROXTON(dev))
949 /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */
950 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7, 948 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
951 GEN9_ENABLE_YV12_BUGFIX); 949 GEN9_ENABLE_YV12_BUGFIX);
952 }
953 950
954 /* Wa4x4STCOptimizationDisable:skl,bxt */ 951 /* Wa4x4STCOptimizationDisable:skl,bxt */
955 /* WaDisablePartialResolveInVc:skl,bxt */ 952 /* WaDisablePartialResolveInVc:skl,bxt */
@@ -961,24 +958,22 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
961 GEN9_CCS_TLB_PREFETCH_ENABLE); 958 GEN9_CCS_TLB_PREFETCH_ENABLE);
962 959
963 /* WaDisableMaskBasedCammingInRCC:skl,bxt */ 960 /* WaDisableMaskBasedCammingInRCC:skl,bxt */
964 if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) == SKL_REVID_C0) || 961 if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_C0) ||
965 (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) 962 IS_BXT_REVID(dev, 0, BXT_REVID_A1))
966 WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0, 963 WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
967 PIXEL_MASK_CAMMING_DISABLE); 964 PIXEL_MASK_CAMMING_DISABLE);
968 965
969 /* WaForceContextSaveRestoreNonCoherent:skl,bxt */ 966 /* WaForceContextSaveRestoreNonCoherent:skl,bxt */
970 tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT; 967 tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
971 if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) == SKL_REVID_F0) || 968 if (IS_SKL_REVID(dev, SKL_REVID_F0, SKL_REVID_F0) ||
972 (IS_BROXTON(dev) && INTEL_REVID(dev) >= BXT_REVID_B0)) 969 IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
973 tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE; 970 tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
974 WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp); 971 WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
975 972
976 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */ 973 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */
977 if (IS_SKYLAKE(dev) || 974 if (IS_SKYLAKE(dev) || IS_BXT_REVID(dev, 0, BXT_REVID_B0))
978 (IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_B0)) {
979 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, 975 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
980 GEN8_SAMPLER_POWER_BYPASS_DIS); 976 GEN8_SAMPLER_POWER_BYPASS_DIS);
981 }
982 977
983 /* WaDisableSTUnitPowerOptimization:skl,bxt */ 978 /* WaDisableSTUnitPowerOptimization:skl,bxt */
984 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE); 979 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
@@ -1038,7 +1033,7 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
1038 if (ret) 1033 if (ret)
1039 return ret; 1034 return ret;
1040 1035
1041 if (INTEL_REVID(dev) <= SKL_REVID_D0) { 1036 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) {
1042 /* WaDisableHDCInvalidation:skl */ 1037 /* WaDisableHDCInvalidation:skl */
1043 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | 1038 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
1044 BDW_DISABLE_HDC_INVALIDATION); 1039 BDW_DISABLE_HDC_INVALIDATION);
@@ -1051,23 +1046,23 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
1051 /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes 1046 /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
1052 * involving this register should also be added to WA batch as required. 1047 * involving this register should also be added to WA batch as required.
1053 */ 1048 */
1054 if (INTEL_REVID(dev) <= SKL_REVID_E0) 1049 if (IS_SKL_REVID(dev, 0, SKL_REVID_E0))
1055 /* WaDisableLSQCROPERFforOCL:skl */ 1050 /* WaDisableLSQCROPERFforOCL:skl */
1056 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | 1051 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
1057 GEN8_LQSC_RO_PERF_DIS); 1052 GEN8_LQSC_RO_PERF_DIS);
1058 1053
1059 /* WaEnableGapsTsvCreditFix:skl */ 1054 /* WaEnableGapsTsvCreditFix:skl */
1060 if (IS_SKYLAKE(dev) && (INTEL_REVID(dev) >= SKL_REVID_C0)) { 1055 if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER)) {
1061 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) | 1056 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1062 GEN9_GAPS_TSV_CREDIT_DISABLE)); 1057 GEN9_GAPS_TSV_CREDIT_DISABLE));
1063 } 1058 }
1064 1059
1065 /* WaDisablePowerCompilerClockGating:skl */ 1060 /* WaDisablePowerCompilerClockGating:skl */
1066 if (INTEL_REVID(dev) == SKL_REVID_B0) 1061 if (IS_SKL_REVID(dev, SKL_REVID_B0, SKL_REVID_B0))
1067 WA_SET_BIT_MASKED(HIZ_CHICKEN, 1062 WA_SET_BIT_MASKED(HIZ_CHICKEN,
1068 BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); 1063 BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
1069 1064
1070 if (INTEL_REVID(dev) <= SKL_REVID_D0) { 1065 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) {
1071 /* 1066 /*
1072 *Use Force Non-Coherent whenever executing a 3D context. This 1067 *Use Force Non-Coherent whenever executing a 3D context. This
1073 * is a workaround for a possible hang in the unlikely event 1068 * is a workaround for a possible hang in the unlikely event
@@ -1078,19 +1073,17 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
1078 HDC_FORCE_NON_COHERENT); 1073 HDC_FORCE_NON_COHERENT);
1079 } 1074 }
1080 1075
1081 if (INTEL_REVID(dev) == SKL_REVID_C0 || 1076 /* WaBarrierPerformanceFixDisable:skl */
1082 INTEL_REVID(dev) == SKL_REVID_D0) 1077 if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0))
1083 /* WaBarrierPerformanceFixDisable:skl */
1084 WA_SET_BIT_MASKED(HDC_CHICKEN0, 1078 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1085 HDC_FENCE_DEST_SLM_DISABLE | 1079 HDC_FENCE_DEST_SLM_DISABLE |
1086 HDC_BARRIER_PERFORMANCE_DISABLE); 1080 HDC_BARRIER_PERFORMANCE_DISABLE);
1087 1081
1088 /* WaDisableSbeCacheDispatchPortSharing:skl */ 1082 /* WaDisableSbeCacheDispatchPortSharing:skl */
1089 if (INTEL_REVID(dev) <= SKL_REVID_F0) { 1083 if (IS_SKL_REVID(dev, 0, SKL_REVID_F0))
1090 WA_SET_BIT_MASKED( 1084 WA_SET_BIT_MASKED(
1091 GEN7_HALF_SLICE_CHICKEN1, 1085 GEN7_HALF_SLICE_CHICKEN1,
1092 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); 1086 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1093 }
1094 1087
1095 return skl_tune_iz_hashing(ring); 1088 return skl_tune_iz_hashing(ring);
1096} 1089}
@@ -1107,11 +1100,11 @@ static int bxt_init_workarounds(struct intel_engine_cs *ring)
1107 1100
1108 /* WaStoreMultiplePTEenable:bxt */ 1101 /* WaStoreMultiplePTEenable:bxt */
1109 /* This is a requirement according to Hardware specification */ 1102 /* This is a requirement according to Hardware specification */
1110 if (INTEL_REVID(dev) == BXT_REVID_A0) 1103 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
1111 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF); 1104 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
1112 1105
1113 /* WaSetClckGatingDisableMedia:bxt */ 1106 /* WaSetClckGatingDisableMedia:bxt */
1114 if (INTEL_REVID(dev) == BXT_REVID_A0) { 1107 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
1115 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & 1108 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
1116 ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE)); 1109 ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
1117 } 1110 }
@@ -1121,7 +1114,7 @@ static int bxt_init_workarounds(struct intel_engine_cs *ring)
1121 STALL_DOP_GATING_DISABLE); 1114 STALL_DOP_GATING_DISABLE);
1122 1115
1123 /* WaDisableSbeCacheDispatchPortSharing:bxt */ 1116 /* WaDisableSbeCacheDispatchPortSharing:bxt */
1124 if (INTEL_REVID(dev) <= BXT_REVID_B0) { 1117 if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) {
1125 WA_SET_BIT_MASKED( 1118 WA_SET_BIT_MASKED(
1126 GEN7_HALF_SLICE_CHICKEN1, 1119 GEN7_HALF_SLICE_CHICKEN1,
1127 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); 1120 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
@@ -1319,11 +1312,13 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
1319 return ret; 1312 return ret;
1320 1313
1321 for_each_ring(useless, dev_priv, i) { 1314 for_each_ring(useless, dev_priv, i) {
1322 u32 mbox_reg = signaller->semaphore.mbox.signal[i]; 1315 i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[i];
1323 if (mbox_reg != GEN6_NOSYNC) { 1316
1317 if (i915_mmio_reg_valid(mbox_reg)) {
1324 u32 seqno = i915_gem_request_get_seqno(signaller_req); 1318 u32 seqno = i915_gem_request_get_seqno(signaller_req);
1319
1325 intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1)); 1320 intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
1326 intel_ring_emit(signaller, mbox_reg); 1321 intel_ring_emit_reg(signaller, mbox_reg);
1327 intel_ring_emit(signaller, seqno); 1322 intel_ring_emit(signaller, seqno);
1328 } 1323 }
1329 } 1324 }
@@ -2004,11 +1999,35 @@ static int init_phys_status_page(struct intel_engine_cs *ring)
2004 1999
2005void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf) 2000void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
2006{ 2001{
2007 iounmap(ringbuf->virtual_start); 2002 if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
2003 vunmap(ringbuf->virtual_start);
2004 else
2005 iounmap(ringbuf->virtual_start);
2008 ringbuf->virtual_start = NULL; 2006 ringbuf->virtual_start = NULL;
2009 i915_gem_object_ggtt_unpin(ringbuf->obj); 2007 i915_gem_object_ggtt_unpin(ringbuf->obj);
2010} 2008}
2011 2009
2010static u32 *vmap_obj(struct drm_i915_gem_object *obj)
2011{
2012 struct sg_page_iter sg_iter;
2013 struct page **pages;
2014 void *addr;
2015 int i;
2016
2017 pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
2018 if (pages == NULL)
2019 return NULL;
2020
2021 i = 0;
2022 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
2023 pages[i++] = sg_page_iter_page(&sg_iter);
2024
2025 addr = vmap(pages, i, 0, PAGE_KERNEL);
2026 drm_free_large(pages);
2027
2028 return addr;
2029}
2030
2012int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, 2031int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
2013 struct intel_ringbuffer *ringbuf) 2032 struct intel_ringbuffer *ringbuf)
2014{ 2033{
@@ -2016,21 +2035,39 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
2016 struct drm_i915_gem_object *obj = ringbuf->obj; 2035 struct drm_i915_gem_object *obj = ringbuf->obj;
2017 int ret; 2036 int ret;
2018 2037
2019 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); 2038 if (HAS_LLC(dev_priv) && !obj->stolen) {
2020 if (ret) 2039 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 0);
2021 return ret; 2040 if (ret)
2041 return ret;
2022 2042
2023 ret = i915_gem_object_set_to_gtt_domain(obj, true); 2043 ret = i915_gem_object_set_to_cpu_domain(obj, true);
2024 if (ret) { 2044 if (ret) {
2025 i915_gem_object_ggtt_unpin(obj); 2045 i915_gem_object_ggtt_unpin(obj);
2026 return ret; 2046 return ret;
2027 } 2047 }
2048
2049 ringbuf->virtual_start = vmap_obj(obj);
2050 if (ringbuf->virtual_start == NULL) {
2051 i915_gem_object_ggtt_unpin(obj);
2052 return -ENOMEM;
2053 }
2054 } else {
2055 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
2056 if (ret)
2057 return ret;
2028 2058
2029 ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base + 2059 ret = i915_gem_object_set_to_gtt_domain(obj, true);
2030 i915_gem_obj_ggtt_offset(obj), ringbuf->size); 2060 if (ret) {
2031 if (ringbuf->virtual_start == NULL) { 2061 i915_gem_object_ggtt_unpin(obj);
2032 i915_gem_object_ggtt_unpin(obj); 2062 return ret;
2033 return -EINVAL; 2063 }
2064
2065 ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
2066 i915_gem_obj_ggtt_offset(obj), ringbuf->size);
2067 if (ringbuf->virtual_start == NULL) {
2068 i915_gem_object_ggtt_unpin(obj);
2069 return -EINVAL;
2070 }
2034 } 2071 }
2035 2072
2036 return 0; 2073 return 0;
@@ -2070,10 +2107,14 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
2070 int ret; 2107 int ret;
2071 2108
2072 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 2109 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
2073 if (ring == NULL) 2110 if (ring == NULL) {
2111 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
2112 engine->name);
2074 return ERR_PTR(-ENOMEM); 2113 return ERR_PTR(-ENOMEM);
2114 }
2075 2115
2076 ring->ring = engine; 2116 ring->ring = engine;
2117 list_add(&ring->link, &engine->buffers);
2077 2118
2078 ring->size = size; 2119 ring->size = size;
2079 /* Workaround an erratum on the i830 which causes a hang if 2120 /* Workaround an erratum on the i830 which causes a hang if
@@ -2089,8 +2130,9 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
2089 2130
2090 ret = intel_alloc_ringbuffer_obj(engine->dev, ring); 2131 ret = intel_alloc_ringbuffer_obj(engine->dev, ring);
2091 if (ret) { 2132 if (ret) {
2092 DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", 2133 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n",
2093 engine->name, ret); 2134 engine->name, ret);
2135 list_del(&ring->link);
2094 kfree(ring); 2136 kfree(ring);
2095 return ERR_PTR(ret); 2137 return ERR_PTR(ret);
2096 } 2138 }
@@ -2102,6 +2144,7 @@ void
2102intel_ringbuffer_free(struct intel_ringbuffer *ring) 2144intel_ringbuffer_free(struct intel_ringbuffer *ring)
2103{ 2145{
2104 intel_destroy_ringbuffer_obj(ring); 2146 intel_destroy_ringbuffer_obj(ring);
2147 list_del(&ring->link);
2105 kfree(ring); 2148 kfree(ring);
2106} 2149}
2107 2150
@@ -2117,6 +2160,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
2117 INIT_LIST_HEAD(&ring->active_list); 2160 INIT_LIST_HEAD(&ring->active_list);
2118 INIT_LIST_HEAD(&ring->request_list); 2161 INIT_LIST_HEAD(&ring->request_list);
2119 INIT_LIST_HEAD(&ring->execlist_queue); 2162 INIT_LIST_HEAD(&ring->execlist_queue);
2163 INIT_LIST_HEAD(&ring->buffers);
2120 i915_gem_batch_pool_init(dev, &ring->batch_pool); 2164 i915_gem_batch_pool_init(dev, &ring->batch_pool);
2121 memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno)); 2165 memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
2122 2166
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 49fa41dc0eb6..5d1eb206151d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -100,6 +100,7 @@ struct intel_ringbuffer {
100 void __iomem *virtual_start; 100 void __iomem *virtual_start;
101 101
102 struct intel_engine_cs *ring; 102 struct intel_engine_cs *ring;
103 struct list_head link;
103 104
104 u32 head; 105 u32 head;
105 u32 tail; 106 u32 tail;
@@ -157,6 +158,7 @@ struct intel_engine_cs {
157 u32 mmio_base; 158 u32 mmio_base;
158 struct drm_device *dev; 159 struct drm_device *dev;
159 struct intel_ringbuffer *buffer; 160 struct intel_ringbuffer *buffer;
161 struct list_head buffers;
160 162
161 /* 163 /*
162 * A pool of objects to use as shadow copies of client batch buffers 164 * A pool of objects to use as shadow copies of client batch buffers
@@ -247,7 +249,7 @@ struct intel_engine_cs {
247 /* our mbox written by others */ 249 /* our mbox written by others */
248 u32 wait[I915_NUM_RINGS]; 250 u32 wait[I915_NUM_RINGS];
249 /* mboxes this ring signals to */ 251 /* mboxes this ring signals to */
250 u32 signal[I915_NUM_RINGS]; 252 i915_reg_t signal[I915_NUM_RINGS];
251 } mbox; 253 } mbox;
252 u64 signal_ggtt[I915_NUM_RINGS]; 254 u64 signal_ggtt[I915_NUM_RINGS];
253 }; 255 };
@@ -441,6 +443,11 @@ static inline void intel_ring_emit(struct intel_engine_cs *ring,
441 iowrite32(data, ringbuf->virtual_start + ringbuf->tail); 443 iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
442 ringbuf->tail += 4; 444 ringbuf->tail += 4;
443} 445}
446static inline void intel_ring_emit_reg(struct intel_engine_cs *ring,
447 i915_reg_t reg)
448{
449 intel_ring_emit(ring, i915_mmio_reg_offset(reg));
450}
444static inline void intel_ring_advance(struct intel_engine_cs *ring) 451static inline void intel_ring_advance(struct intel_engine_cs *ring)
445{ 452{
446 struct intel_ringbuffer *ringbuf = ring->buffer; 453 struct intel_ringbuffer *ringbuf = ring->buffer;
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index d89c1d0aa1b7..2c2151f1c47e 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -49,25 +49,88 @@
49 * present for a given platform. 49 * present for a given platform.
50 */ 50 */
51 51
52#define GEN9_ENABLE_DC5(dev) 0
53#define SKL_ENABLE_DC6(dev) IS_SKYLAKE(dev)
54
55#define for_each_power_well(i, power_well, domain_mask, power_domains) \ 52#define for_each_power_well(i, power_well, domain_mask, power_domains) \
56 for (i = 0; \ 53 for (i = 0; \
57 i < (power_domains)->power_well_count && \ 54 i < (power_domains)->power_well_count && \
58 ((power_well) = &(power_domains)->power_wells[i]); \ 55 ((power_well) = &(power_domains)->power_wells[i]); \
59 i++) \ 56 i++) \
60 if ((power_well)->domains & (domain_mask)) 57 for_each_if ((power_well)->domains & (domain_mask))
61 58
62#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \ 59#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
63 for (i = (power_domains)->power_well_count - 1; \ 60 for (i = (power_domains)->power_well_count - 1; \
64 i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\ 61 i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
65 i--) \ 62 i--) \
66 if ((power_well)->domains & (domain_mask)) 63 for_each_if ((power_well)->domains & (domain_mask))
67 64
68bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 65bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
69 int power_well_id); 66 int power_well_id);
70 67
68const char *
69intel_display_power_domain_str(enum intel_display_power_domain domain)
70{
71 switch (domain) {
72 case POWER_DOMAIN_PIPE_A:
73 return "PIPE_A";
74 case POWER_DOMAIN_PIPE_B:
75 return "PIPE_B";
76 case POWER_DOMAIN_PIPE_C:
77 return "PIPE_C";
78 case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
79 return "PIPE_A_PANEL_FITTER";
80 case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
81 return "PIPE_B_PANEL_FITTER";
82 case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
83 return "PIPE_C_PANEL_FITTER";
84 case POWER_DOMAIN_TRANSCODER_A:
85 return "TRANSCODER_A";
86 case POWER_DOMAIN_TRANSCODER_B:
87 return "TRANSCODER_B";
88 case POWER_DOMAIN_TRANSCODER_C:
89 return "TRANSCODER_C";
90 case POWER_DOMAIN_TRANSCODER_EDP:
91 return "TRANSCODER_EDP";
92 case POWER_DOMAIN_PORT_DDI_A_LANES:
93 return "PORT_DDI_A_LANES";
94 case POWER_DOMAIN_PORT_DDI_B_LANES:
95 return "PORT_DDI_B_LANES";
96 case POWER_DOMAIN_PORT_DDI_C_LANES:
97 return "PORT_DDI_C_LANES";
98 case POWER_DOMAIN_PORT_DDI_D_LANES:
99 return "PORT_DDI_D_LANES";
100 case POWER_DOMAIN_PORT_DDI_E_LANES:
101 return "PORT_DDI_E_LANES";
102 case POWER_DOMAIN_PORT_DSI:
103 return "PORT_DSI";
104 case POWER_DOMAIN_PORT_CRT:
105 return "PORT_CRT";
106 case POWER_DOMAIN_PORT_OTHER:
107 return "PORT_OTHER";
108 case POWER_DOMAIN_VGA:
109 return "VGA";
110 case POWER_DOMAIN_AUDIO:
111 return "AUDIO";
112 case POWER_DOMAIN_PLLS:
113 return "PLLS";
114 case POWER_DOMAIN_AUX_A:
115 return "AUX_A";
116 case POWER_DOMAIN_AUX_B:
117 return "AUX_B";
118 case POWER_DOMAIN_AUX_C:
119 return "AUX_C";
120 case POWER_DOMAIN_AUX_D:
121 return "AUX_D";
122 case POWER_DOMAIN_GMBUS:
123 return "GMBUS";
124 case POWER_DOMAIN_INIT:
125 return "INIT";
126 case POWER_DOMAIN_MODESET:
127 return "MODESET";
128 default:
129 MISSING_CASE(domain);
130 return "?";
131 }
132}
133
71static void intel_power_well_enable(struct drm_i915_private *dev_priv, 134static void intel_power_well_enable(struct drm_i915_private *dev_priv,
72 struct i915_power_well *power_well) 135 struct i915_power_well *power_well)
73{ 136{
@@ -244,12 +307,6 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
244 gen8_irq_power_well_post_enable(dev_priv, 307 gen8_irq_power_well_post_enable(dev_priv,
245 1 << PIPE_C | 1 << PIPE_B); 308 1 << PIPE_C | 1 << PIPE_B);
246 } 309 }
247
248 if (power_well->data == SKL_DISP_PW_1) {
249 if (!dev_priv->power_domains.initializing)
250 intel_prepare_ddi(dev);
251 gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A);
252 }
253} 310}
254 311
255static void hsw_set_power_well(struct drm_i915_private *dev_priv, 312static void hsw_set_power_well(struct drm_i915_private *dev_priv,
@@ -292,58 +349,38 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
292 BIT(POWER_DOMAIN_TRANSCODER_C) | \ 349 BIT(POWER_DOMAIN_TRANSCODER_C) | \
293 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 350 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
294 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 351 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
295 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ 352 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
296 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ 353 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
297 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ 354 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
298 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ 355 BIT(POWER_DOMAIN_PORT_DDI_E_LANES) | \
299 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
300 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
301 BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) | \
302 BIT(POWER_DOMAIN_AUX_B) | \ 356 BIT(POWER_DOMAIN_AUX_B) | \
303 BIT(POWER_DOMAIN_AUX_C) | \ 357 BIT(POWER_DOMAIN_AUX_C) | \
304 BIT(POWER_DOMAIN_AUX_D) | \ 358 BIT(POWER_DOMAIN_AUX_D) | \
305 BIT(POWER_DOMAIN_AUDIO) | \ 359 BIT(POWER_DOMAIN_AUDIO) | \
306 BIT(POWER_DOMAIN_VGA) | \ 360 BIT(POWER_DOMAIN_VGA) | \
307 BIT(POWER_DOMAIN_INIT)) 361 BIT(POWER_DOMAIN_INIT))
308#define SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \
309 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
310 BIT(POWER_DOMAIN_PLLS) | \
311 BIT(POWER_DOMAIN_PIPE_A) | \
312 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
313 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
314 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
315 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
316 BIT(POWER_DOMAIN_AUX_A) | \
317 BIT(POWER_DOMAIN_INIT))
318#define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS ( \ 362#define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS ( \
319 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \ 363 BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
320 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \ 364 BIT(POWER_DOMAIN_PORT_DDI_E_LANES) | \
321 BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) | \
322 BIT(POWER_DOMAIN_INIT)) 365 BIT(POWER_DOMAIN_INIT))
323#define SKL_DISPLAY_DDI_B_POWER_DOMAINS ( \ 366#define SKL_DISPLAY_DDI_B_POWER_DOMAINS ( \
324 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ 367 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
325 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
326 BIT(POWER_DOMAIN_INIT)) 368 BIT(POWER_DOMAIN_INIT))
327#define SKL_DISPLAY_DDI_C_POWER_DOMAINS ( \ 369#define SKL_DISPLAY_DDI_C_POWER_DOMAINS ( \
328 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ 370 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
329 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
330 BIT(POWER_DOMAIN_INIT)) 371 BIT(POWER_DOMAIN_INIT))
331#define SKL_DISPLAY_DDI_D_POWER_DOMAINS ( \ 372#define SKL_DISPLAY_DDI_D_POWER_DOMAINS ( \
332 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ 373 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
333 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
334 BIT(POWER_DOMAIN_INIT)) 374 BIT(POWER_DOMAIN_INIT))
335#define SKL_DISPLAY_MISC_IO_POWER_DOMAINS ( \ 375#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
336 SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS | \ 376 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
337 BIT(POWER_DOMAIN_PLLS) | \ 377 BIT(POWER_DOMAIN_MODESET) | \
378 BIT(POWER_DOMAIN_AUX_A) | \
338 BIT(POWER_DOMAIN_INIT)) 379 BIT(POWER_DOMAIN_INIT))
339#define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \ 380#define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \
340 (POWER_DOMAIN_MASK & ~(SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS | \ 381 (POWER_DOMAIN_MASK & ~( \
341 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 382 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
342 SKL_DISPLAY_DDI_A_E_POWER_DOMAINS | \ 383 SKL_DISPLAY_DC_OFF_POWER_DOMAINS)) | \
343 SKL_DISPLAY_DDI_B_POWER_DOMAINS | \
344 SKL_DISPLAY_DDI_C_POWER_DOMAINS | \
345 SKL_DISPLAY_DDI_D_POWER_DOMAINS | \
346 SKL_DISPLAY_MISC_IO_POWER_DOMAINS)) | \
347 BIT(POWER_DOMAIN_INIT)) 384 BIT(POWER_DOMAIN_INIT))
348 385
349#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 386#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
@@ -354,25 +391,28 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
354 BIT(POWER_DOMAIN_TRANSCODER_C) | \ 391 BIT(POWER_DOMAIN_TRANSCODER_C) | \
355 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 392 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
356 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 393 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
357 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ 394 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
358 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ 395 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
359 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
360 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
361 BIT(POWER_DOMAIN_AUX_B) | \ 396 BIT(POWER_DOMAIN_AUX_B) | \
362 BIT(POWER_DOMAIN_AUX_C) | \ 397 BIT(POWER_DOMAIN_AUX_C) | \
363 BIT(POWER_DOMAIN_AUDIO) | \ 398 BIT(POWER_DOMAIN_AUDIO) | \
364 BIT(POWER_DOMAIN_VGA) | \ 399 BIT(POWER_DOMAIN_VGA) | \
400 BIT(POWER_DOMAIN_GMBUS) | \
365 BIT(POWER_DOMAIN_INIT)) 401 BIT(POWER_DOMAIN_INIT))
366#define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \ 402#define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \
367 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 403 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
368 BIT(POWER_DOMAIN_PIPE_A) | \ 404 BIT(POWER_DOMAIN_PIPE_A) | \
369 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \ 405 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
370 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 406 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
371 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \ 407 BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
372 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
373 BIT(POWER_DOMAIN_AUX_A) | \ 408 BIT(POWER_DOMAIN_AUX_A) | \
374 BIT(POWER_DOMAIN_PLLS) | \ 409 BIT(POWER_DOMAIN_PLLS) | \
375 BIT(POWER_DOMAIN_INIT)) 410 BIT(POWER_DOMAIN_INIT))
411#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
412 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
413 BIT(POWER_DOMAIN_MODESET) | \
414 BIT(POWER_DOMAIN_AUX_A) | \
415 BIT(POWER_DOMAIN_INIT))
376#define BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \ 416#define BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \
377 (POWER_DOMAIN_MASK & ~(BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS | \ 417 (POWER_DOMAIN_MASK & ~(BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS | \
378 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS)) | \ 418 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS)) | \
@@ -416,46 +456,74 @@ static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
416 */ 456 */
417} 457}
418 458
419void bxt_enable_dc9(struct drm_i915_private *dev_priv) 459static void gen9_set_dc_state_debugmask_memory_up(
460 struct drm_i915_private *dev_priv)
420{ 461{
421 uint32_t val; 462 uint32_t val;
422 463
423 assert_can_enable_dc9(dev_priv); 464 /* The below bit doesn't need to be cleared ever afterwards */
465 val = I915_READ(DC_STATE_DEBUG);
466 if (!(val & DC_STATE_DEBUG_MASK_MEMORY_UP)) {
467 val |= DC_STATE_DEBUG_MASK_MEMORY_UP;
468 I915_WRITE(DC_STATE_DEBUG, val);
469 POSTING_READ(DC_STATE_DEBUG);
470 }
471}
424 472
425 DRM_DEBUG_KMS("Enabling DC9\n"); 473static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
474{
475 uint32_t val;
476 uint32_t mask;
477
478 mask = DC_STATE_EN_UPTO_DC5;
479 if (IS_BROXTON(dev_priv))
480 mask |= DC_STATE_EN_DC9;
481 else
482 mask |= DC_STATE_EN_UPTO_DC6;
483
484 WARN_ON_ONCE(state & ~mask);
485
486 if (i915.enable_dc == 0)
487 state = DC_STATE_DISABLE;
488 else if (i915.enable_dc == 1 && state > DC_STATE_EN_UPTO_DC5)
489 state = DC_STATE_EN_UPTO_DC5;
490
491 if (state & DC_STATE_EN_UPTO_DC5_DC6_MASK)
492 gen9_set_dc_state_debugmask_memory_up(dev_priv);
426 493
427 val = I915_READ(DC_STATE_EN); 494 val = I915_READ(DC_STATE_EN);
428 val |= DC_STATE_EN_DC9; 495 DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
496 val & mask, state);
497 val &= ~mask;
498 val |= state;
429 I915_WRITE(DC_STATE_EN, val); 499 I915_WRITE(DC_STATE_EN, val);
430 POSTING_READ(DC_STATE_EN); 500 POSTING_READ(DC_STATE_EN);
431} 501}
432 502
433void bxt_disable_dc9(struct drm_i915_private *dev_priv) 503void bxt_enable_dc9(struct drm_i915_private *dev_priv)
434{ 504{
435 uint32_t val; 505 assert_can_enable_dc9(dev_priv);
506
507 DRM_DEBUG_KMS("Enabling DC9\n");
508
509 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
510}
436 511
512void bxt_disable_dc9(struct drm_i915_private *dev_priv)
513{
437 assert_can_disable_dc9(dev_priv); 514 assert_can_disable_dc9(dev_priv);
438 515
439 DRM_DEBUG_KMS("Disabling DC9\n"); 516 DRM_DEBUG_KMS("Disabling DC9\n");
440 517
441 val = I915_READ(DC_STATE_EN); 518 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
442 val &= ~DC_STATE_EN_DC9;
443 I915_WRITE(DC_STATE_EN, val);
444 POSTING_READ(DC_STATE_EN);
445} 519}
446 520
447static void gen9_set_dc_state_debugmask_memory_up( 521static void assert_csr_loaded(struct drm_i915_private *dev_priv)
448 struct drm_i915_private *dev_priv)
449{ 522{
450 uint32_t val; 523 WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
451 524 "CSR program storage start is NULL\n");
452 /* The below bit doesn't need to be cleared ever afterwards */ 525 WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
453 val = I915_READ(DC_STATE_DEBUG); 526 WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
454 if (!(val & DC_STATE_DEBUG_MASK_MEMORY_UP)) {
455 val |= DC_STATE_DEBUG_MASK_MEMORY_UP;
456 I915_WRITE(DC_STATE_DEBUG, val);
457 POSTING_READ(DC_STATE_DEBUG);
458 }
459} 527}
460 528
461static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) 529static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
@@ -478,8 +546,6 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
478 546
479static void assert_can_disable_dc5(struct drm_i915_private *dev_priv) 547static void assert_can_disable_dc5(struct drm_i915_private *dev_priv)
480{ 548{
481 bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
482 SKL_DISP_PW_2);
483 /* 549 /*
484 * During initialization, the firmware may not be loaded yet. 550 * During initialization, the firmware may not be loaded yet.
485 * We still want to make sure that the DC enabling flag is cleared. 551 * We still want to make sure that the DC enabling flag is cleared.
@@ -487,40 +553,17 @@ static void assert_can_disable_dc5(struct drm_i915_private *dev_priv)
487 if (dev_priv->power_domains.initializing) 553 if (dev_priv->power_domains.initializing)
488 return; 554 return;
489 555
490 WARN_ONCE(!pg2_enabled, "PG2 not enabled to disable DC5.\n");
491 WARN_ONCE(dev_priv->pm.suspended, 556 WARN_ONCE(dev_priv->pm.suspended,
492 "Disabling of DC5 while platform is runtime-suspended should never happen.\n"); 557 "Disabling of DC5 while platform is runtime-suspended should never happen.\n");
493} 558}
494 559
495static void gen9_enable_dc5(struct drm_i915_private *dev_priv) 560static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
496{ 561{
497 uint32_t val;
498
499 assert_can_enable_dc5(dev_priv); 562 assert_can_enable_dc5(dev_priv);
500 563
501 DRM_DEBUG_KMS("Enabling DC5\n"); 564 DRM_DEBUG_KMS("Enabling DC5\n");
502 565
503 gen9_set_dc_state_debugmask_memory_up(dev_priv); 566 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
504
505 val = I915_READ(DC_STATE_EN);
506 val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK;
507 val |= DC_STATE_EN_UPTO_DC5;
508 I915_WRITE(DC_STATE_EN, val);
509 POSTING_READ(DC_STATE_EN);
510}
511
512static void gen9_disable_dc5(struct drm_i915_private *dev_priv)
513{
514 uint32_t val;
515
516 assert_can_disable_dc5(dev_priv);
517
518 DRM_DEBUG_KMS("Disabling DC5\n");
519
520 val = I915_READ(DC_STATE_EN);
521 val &= ~DC_STATE_EN_UPTO_DC5;
522 I915_WRITE(DC_STATE_EN, val);
523 POSTING_READ(DC_STATE_EN);
524} 567}
525 568
526static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) 569static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
@@ -546,40 +589,37 @@ static void assert_can_disable_dc6(struct drm_i915_private *dev_priv)
546 if (dev_priv->power_domains.initializing) 589 if (dev_priv->power_domains.initializing)
547 return; 590 return;
548 591
549 assert_csr_loaded(dev_priv);
550 WARN_ONCE(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6), 592 WARN_ONCE(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
551 "DC6 already programmed to be disabled.\n"); 593 "DC6 already programmed to be disabled.\n");
552} 594}
553 595
554static void skl_enable_dc6(struct drm_i915_private *dev_priv) 596static void gen9_disable_dc5_dc6(struct drm_i915_private *dev_priv)
555{ 597{
556 uint32_t val; 598 assert_can_disable_dc5(dev_priv);
599
600 if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && i915.enable_dc != 1)
601 assert_can_disable_dc6(dev_priv);
557 602
603 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
604}
605
606void skl_enable_dc6(struct drm_i915_private *dev_priv)
607{
558 assert_can_enable_dc6(dev_priv); 608 assert_can_enable_dc6(dev_priv);
559 609
560 DRM_DEBUG_KMS("Enabling DC6\n"); 610 DRM_DEBUG_KMS("Enabling DC6\n");
561 611
562 gen9_set_dc_state_debugmask_memory_up(dev_priv); 612 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
563 613
564 val = I915_READ(DC_STATE_EN);
565 val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK;
566 val |= DC_STATE_EN_UPTO_DC6;
567 I915_WRITE(DC_STATE_EN, val);
568 POSTING_READ(DC_STATE_EN);
569} 614}
570 615
571static void skl_disable_dc6(struct drm_i915_private *dev_priv) 616void skl_disable_dc6(struct drm_i915_private *dev_priv)
572{ 617{
573 uint32_t val;
574
575 assert_can_disable_dc6(dev_priv); 618 assert_can_disable_dc6(dev_priv);
576 619
577 DRM_DEBUG_KMS("Disabling DC6\n"); 620 DRM_DEBUG_KMS("Disabling DC6\n");
578 621
579 val = I915_READ(DC_STATE_EN); 622 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
580 val &= ~DC_STATE_EN_UPTO_DC6;
581 I915_WRITE(DC_STATE_EN, val);
582 POSTING_READ(DC_STATE_EN);
583} 623}
584 624
585static void skl_set_power_well(struct drm_i915_private *dev_priv, 625static void skl_set_power_well(struct drm_i915_private *dev_priv,
@@ -629,20 +669,16 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
629 !I915_READ(HSW_PWR_WELL_BIOS), 669 !I915_READ(HSW_PWR_WELL_BIOS),
630 "Invalid for power well status to be enabled, unless done by the BIOS, \ 670 "Invalid for power well status to be enabled, unless done by the BIOS, \
631 when request is to disable!\n"); 671 when request is to disable!\n");
632 if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) && 672 if (power_well->data == SKL_DISP_PW_2) {
633 power_well->data == SKL_DISP_PW_2) { 673 /*
634 if (SKL_ENABLE_DC6(dev)) { 674 * DDI buffer programming unnecessary during
635 skl_disable_dc6(dev_priv); 675 * driver-load/resume as it's already done
636 /* 676 * during modeset initialization then. It's
637 * DDI buffer programming unnecessary during driver-load/resume 677 * also invalid here as encoder list is still
638 * as it's already done during modeset initialization then. 678 * uninitialized.
639 * It's also invalid here as encoder list is still uninitialized. 679 */
640 */ 680 if (!dev_priv->power_domains.initializing)
641 if (!dev_priv->power_domains.initializing) 681 intel_prepare_ddi(dev);
642 intel_prepare_ddi(dev);
643 } else {
644 gen9_disable_dc5(dev_priv);
645 }
646 } 682 }
647 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask); 683 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
648 } 684 }
@@ -657,34 +693,9 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
657 } 693 }
658 } else { 694 } else {
659 if (enable_requested) { 695 if (enable_requested) {
660 if (IS_SKYLAKE(dev) && 696 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
661 (power_well->data == SKL_DISP_PW_1) && 697 POSTING_READ(HSW_PWR_WELL_DRIVER);
662 (intel_csr_load_status_get(dev_priv) == FW_LOADED)) 698 DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
663 DRM_DEBUG_KMS("Not Disabling PW1, dmc will handle\n");
664 else {
665 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
666 POSTING_READ(HSW_PWR_WELL_DRIVER);
667 DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
668 }
669
670 if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) &&
671 power_well->data == SKL_DISP_PW_2) {
672 enum csr_state state;
673 /* TODO: wait for a completion event or
674 * similar here instead of busy
675 * waiting using wait_for function.
676 */
677 wait_for((state = intel_csr_load_status_get(dev_priv)) !=
678 FW_UNINITIALIZED, 1000);
679 if (state != FW_LOADED)
680 DRM_DEBUG("CSR firmware not ready (%d)\n",
681 state);
682 else
683 if (SKL_ENABLE_DC6(dev))
684 skl_enable_dc6(dev_priv);
685 else
686 gen9_enable_dc5(dev_priv);
687 }
688 } 699 }
689 } 700 }
690 701
@@ -759,6 +770,41 @@ static void skl_power_well_disable(struct drm_i915_private *dev_priv,
759 skl_set_power_well(dev_priv, power_well, false); 770 skl_set_power_well(dev_priv, power_well, false);
760} 771}
761 772
773static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
774 struct i915_power_well *power_well)
775{
776 return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
777}
778
779static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
780 struct i915_power_well *power_well)
781{
782 gen9_disable_dc5_dc6(dev_priv);
783}
784
785static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
786 struct i915_power_well *power_well)
787{
788 if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && i915.enable_dc != 1)
789 skl_enable_dc6(dev_priv);
790 else
791 gen9_enable_dc5(dev_priv);
792}
793
794static void gen9_dc_off_power_well_sync_hw(struct drm_i915_private *dev_priv,
795 struct i915_power_well *power_well)
796{
797 if (power_well->count > 0) {
798 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
799 } else {
800 if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 &&
801 i915.enable_dc != 1)
802 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
803 else
804 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
805 }
806}
807
762static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, 808static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
763 struct i915_power_well *power_well) 809 struct i915_power_well *power_well)
764{ 810{
@@ -973,10 +1019,12 @@ static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_pr
973 int power_well_id) 1019 int power_well_id)
974{ 1020{
975 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1021 struct i915_power_domains *power_domains = &dev_priv->power_domains;
976 struct i915_power_well *power_well;
977 int i; 1022 int i;
978 1023
979 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) { 1024 for (i = 0; i < power_domains->power_well_count; i++) {
1025 struct i915_power_well *power_well;
1026
1027 power_well = &power_domains->power_wells[i];
980 if (power_well->data == power_well_id) 1028 if (power_well->data == power_well_id)
981 return power_well; 1029 return power_well;
982 } 1030 }
@@ -1451,13 +1499,17 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
1451 1499
1452 mutex_lock(&power_domains->lock); 1500 mutex_lock(&power_domains->lock);
1453 1501
1454 WARN_ON(!power_domains->domain_use_count[domain]); 1502 WARN(!power_domains->domain_use_count[domain],
1503 "Use count on domain %s is already zero\n",
1504 intel_display_power_domain_str(domain));
1455 power_domains->domain_use_count[domain]--; 1505 power_domains->domain_use_count[domain]--;
1456 1506
1457 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) { 1507 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
1458 WARN_ON(!power_well->count); 1508 WARN(!power_well->count,
1509 "Use count on power well %s is already zero",
1510 power_well->name);
1459 1511
1460 if (!--power_well->count && i915.disable_power_well) 1512 if (!--power_well->count)
1461 intel_power_well_disable(dev_priv, power_well); 1513 intel_power_well_disable(dev_priv, power_well);
1462 } 1514 }
1463 1515
@@ -1469,20 +1521,17 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
1469#define HSW_ALWAYS_ON_POWER_DOMAINS ( \ 1521#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
1470 BIT(POWER_DOMAIN_PIPE_A) | \ 1522 BIT(POWER_DOMAIN_PIPE_A) | \
1471 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \ 1523 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
1472 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \ 1524 BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
1473 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \ 1525 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1474 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ 1526 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1475 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ 1527 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
1476 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
1477 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
1478 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
1479 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
1480 BIT(POWER_DOMAIN_PORT_CRT) | \ 1528 BIT(POWER_DOMAIN_PORT_CRT) | \
1481 BIT(POWER_DOMAIN_PLLS) | \ 1529 BIT(POWER_DOMAIN_PLLS) | \
1482 BIT(POWER_DOMAIN_AUX_A) | \ 1530 BIT(POWER_DOMAIN_AUX_A) | \
1483 BIT(POWER_DOMAIN_AUX_B) | \ 1531 BIT(POWER_DOMAIN_AUX_B) | \
1484 BIT(POWER_DOMAIN_AUX_C) | \ 1532 BIT(POWER_DOMAIN_AUX_C) | \
1485 BIT(POWER_DOMAIN_AUX_D) | \ 1533 BIT(POWER_DOMAIN_AUX_D) | \
1534 BIT(POWER_DOMAIN_GMBUS) | \
1486 BIT(POWER_DOMAIN_INIT)) 1535 BIT(POWER_DOMAIN_INIT))
1487#define HSW_DISPLAY_POWER_DOMAINS ( \ 1536#define HSW_DISPLAY_POWER_DOMAINS ( \
1488 (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \ 1537 (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
@@ -1499,49 +1548,42 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
1499#define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK 1548#define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
1500 1549
1501#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ 1550#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
1502 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ 1551 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1503 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ 1552 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1504 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
1505 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
1506 BIT(POWER_DOMAIN_PORT_CRT) | \ 1553 BIT(POWER_DOMAIN_PORT_CRT) | \
1507 BIT(POWER_DOMAIN_AUX_B) | \ 1554 BIT(POWER_DOMAIN_AUX_B) | \
1508 BIT(POWER_DOMAIN_AUX_C) | \ 1555 BIT(POWER_DOMAIN_AUX_C) | \
1509 BIT(POWER_DOMAIN_INIT)) 1556 BIT(POWER_DOMAIN_INIT))
1510 1557
1511#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \ 1558#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
1512 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ 1559 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1513 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
1514 BIT(POWER_DOMAIN_AUX_B) | \ 1560 BIT(POWER_DOMAIN_AUX_B) | \
1515 BIT(POWER_DOMAIN_INIT)) 1561 BIT(POWER_DOMAIN_INIT))
1516 1562
1517#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \ 1563#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
1518 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ 1564 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1519 BIT(POWER_DOMAIN_AUX_B) | \ 1565 BIT(POWER_DOMAIN_AUX_B) | \
1520 BIT(POWER_DOMAIN_INIT)) 1566 BIT(POWER_DOMAIN_INIT))
1521 1567
1522#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \ 1568#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
1523 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ 1569 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1524 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
1525 BIT(POWER_DOMAIN_AUX_C) | \ 1570 BIT(POWER_DOMAIN_AUX_C) | \
1526 BIT(POWER_DOMAIN_INIT)) 1571 BIT(POWER_DOMAIN_INIT))
1527 1572
1528#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \ 1573#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
1529 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ 1574 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1530 BIT(POWER_DOMAIN_AUX_C) | \ 1575 BIT(POWER_DOMAIN_AUX_C) | \
1531 BIT(POWER_DOMAIN_INIT)) 1576 BIT(POWER_DOMAIN_INIT))
1532 1577
1533#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ 1578#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
1534 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ 1579 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1535 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ 1580 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1536 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
1537 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
1538 BIT(POWER_DOMAIN_AUX_B) | \ 1581 BIT(POWER_DOMAIN_AUX_B) | \
1539 BIT(POWER_DOMAIN_AUX_C) | \ 1582 BIT(POWER_DOMAIN_AUX_C) | \
1540 BIT(POWER_DOMAIN_INIT)) 1583 BIT(POWER_DOMAIN_INIT))
1541 1584
1542#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \ 1585#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
1543 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ 1586 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
1544 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
1545 BIT(POWER_DOMAIN_AUX_D) | \ 1587 BIT(POWER_DOMAIN_AUX_D) | \
1546 BIT(POWER_DOMAIN_INIT)) 1588 BIT(POWER_DOMAIN_INIT))
1547 1589
@@ -1589,6 +1631,13 @@ static const struct i915_power_well_ops skl_power_well_ops = {
1589 .is_enabled = skl_power_well_enabled, 1631 .is_enabled = skl_power_well_enabled,
1590}; 1632};
1591 1633
1634static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
1635 .sync_hw = gen9_dc_off_power_well_sync_hw,
1636 .enable = gen9_dc_off_power_well_enable,
1637 .disable = gen9_dc_off_power_well_disable,
1638 .is_enabled = gen9_dc_off_power_well_enabled,
1639};
1640
1592static struct i915_power_well hsw_power_wells[] = { 1641static struct i915_power_well hsw_power_wells[] = {
1593 { 1642 {
1594 .name = "always-on", 1643 .name = "always-on",
@@ -1644,6 +1693,7 @@ static struct i915_power_well vlv_power_wells[] = {
1644 .always_on = 1, 1693 .always_on = 1,
1645 .domains = VLV_ALWAYS_ON_POWER_DOMAINS, 1694 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
1646 .ops = &i9xx_always_on_power_well_ops, 1695 .ops = &i9xx_always_on_power_well_ops,
1696 .data = PUNIT_POWER_WELL_ALWAYS_ON,
1647 }, 1697 },
1648 { 1698 {
1649 .name = "display", 1699 .name = "display",
@@ -1745,20 +1795,29 @@ static struct i915_power_well skl_power_wells[] = {
1745 .always_on = 1, 1795 .always_on = 1,
1746 .domains = SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS, 1796 .domains = SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
1747 .ops = &i9xx_always_on_power_well_ops, 1797 .ops = &i9xx_always_on_power_well_ops,
1798 .data = SKL_DISP_PW_ALWAYS_ON,
1748 }, 1799 },
1749 { 1800 {
1750 .name = "power well 1", 1801 .name = "power well 1",
1751 .domains = SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS, 1802 /* Handled by the DMC firmware */
1803 .domains = 0,
1752 .ops = &skl_power_well_ops, 1804 .ops = &skl_power_well_ops,
1753 .data = SKL_DISP_PW_1, 1805 .data = SKL_DISP_PW_1,
1754 }, 1806 },
1755 { 1807 {
1756 .name = "MISC IO power well", 1808 .name = "MISC IO power well",
1757 .domains = SKL_DISPLAY_MISC_IO_POWER_DOMAINS, 1809 /* Handled by the DMC firmware */
1810 .domains = 0,
1758 .ops = &skl_power_well_ops, 1811 .ops = &skl_power_well_ops,
1759 .data = SKL_DISP_PW_MISC_IO, 1812 .data = SKL_DISP_PW_MISC_IO,
1760 }, 1813 },
1761 { 1814 {
1815 .name = "DC off",
1816 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
1817 .ops = &gen9_dc_off_power_well_ops,
1818 .data = SKL_DISP_PW_DC_OFF,
1819 },
1820 {
1762 .name = "power well 2", 1821 .name = "power well 2",
1763 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS, 1822 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
1764 .ops = &skl_power_well_ops, 1823 .ops = &skl_power_well_ops,
@@ -1790,6 +1849,34 @@ static struct i915_power_well skl_power_wells[] = {
1790 }, 1849 },
1791}; 1850};
1792 1851
1852void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv)
1853{
1854 struct i915_power_well *well;
1855
1856 if (!IS_SKYLAKE(dev_priv))
1857 return;
1858
1859 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1860 intel_power_well_enable(dev_priv, well);
1861
1862 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
1863 intel_power_well_enable(dev_priv, well);
1864}
1865
1866void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv)
1867{
1868 struct i915_power_well *well;
1869
1870 if (!IS_SKYLAKE(dev_priv))
1871 return;
1872
1873 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1874 intel_power_well_disable(dev_priv, well);
1875
1876 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
1877 intel_power_well_disable(dev_priv, well);
1878}
1879
1793static struct i915_power_well bxt_power_wells[] = { 1880static struct i915_power_well bxt_power_wells[] = {
1794 { 1881 {
1795 .name = "always-on", 1882 .name = "always-on",
@@ -1804,11 +1891,17 @@ static struct i915_power_well bxt_power_wells[] = {
1804 .data = SKL_DISP_PW_1, 1891 .data = SKL_DISP_PW_1,
1805 }, 1892 },
1806 { 1893 {
1894 .name = "DC off",
1895 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
1896 .ops = &gen9_dc_off_power_well_ops,
1897 .data = SKL_DISP_PW_DC_OFF,
1898 },
1899 {
1807 .name = "power well 2", 1900 .name = "power well 2",
1808 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS, 1901 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
1809 .ops = &skl_power_well_ops, 1902 .ops = &skl_power_well_ops,
1810 .data = SKL_DISP_PW_2, 1903 .data = SKL_DISP_PW_2,
1811 } 1904 },
1812}; 1905};
1813 1906
1814static int 1907static int
@@ -1818,7 +1911,7 @@ sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
1818 if (disable_power_well >= 0) 1911 if (disable_power_well >= 0)
1819 return !!disable_power_well; 1912 return !!disable_power_well;
1820 1913
1821 if (IS_SKYLAKE(dev_priv)) { 1914 if (IS_BROXTON(dev_priv)) {
1822 DRM_DEBUG_KMS("Disabling display power well support\n"); 1915 DRM_DEBUG_KMS("Disabling display power well support\n");
1823 return 0; 1916 return 0;
1824 } 1917 }
@@ -1845,6 +1938,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
1845 i915.disable_power_well = sanitize_disable_power_well_option(dev_priv, 1938 i915.disable_power_well = sanitize_disable_power_well_option(dev_priv,
1846 i915.disable_power_well); 1939 i915.disable_power_well);
1847 1940
1941 BUILD_BUG_ON(POWER_DOMAIN_NUM > 31);
1942
1848 mutex_init(&power_domains->lock); 1943 mutex_init(&power_domains->lock);
1849 1944
1850 /* 1945 /*
@@ -1855,7 +1950,7 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
1855 set_power_wells(power_domains, hsw_power_wells); 1950 set_power_wells(power_domains, hsw_power_wells);
1856 } else if (IS_BROADWELL(dev_priv->dev)) { 1951 } else if (IS_BROADWELL(dev_priv->dev)) {
1857 set_power_wells(power_domains, bdw_power_wells); 1952 set_power_wells(power_domains, bdw_power_wells);
1858 } else if (IS_SKYLAKE(dev_priv->dev)) { 1953 } else if (IS_SKYLAKE(dev_priv->dev) || IS_KABYLAKE(dev_priv->dev)) {
1859 set_power_wells(power_domains, skl_power_wells); 1954 set_power_wells(power_domains, skl_power_wells);
1860 } else if (IS_BROXTON(dev_priv->dev)) { 1955 } else if (IS_BROXTON(dev_priv->dev)) {
1861 set_power_wells(power_domains, bxt_power_wells); 1956 set_power_wells(power_domains, bxt_power_wells);
@@ -1870,21 +1965,6 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
1870 return 0; 1965 return 0;
1871} 1966}
1872 1967
1873static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
1874{
1875 struct drm_device *dev = dev_priv->dev;
1876 struct device *device = &dev->pdev->dev;
1877
1878 if (!HAS_RUNTIME_PM(dev))
1879 return;
1880
1881 if (!intel_enable_rc6(dev))
1882 return;
1883
1884 /* Make sure we're not suspended first. */
1885 pm_runtime_get_sync(device);
1886}
1887
1888/** 1968/**
1889 * intel_power_domains_fini - finalizes the power domain structures 1969 * intel_power_domains_fini - finalizes the power domain structures
1890 * @dev_priv: i915 device instance 1970 * @dev_priv: i915 device instance
@@ -1895,15 +1975,17 @@ static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
1895 */ 1975 */
1896void intel_power_domains_fini(struct drm_i915_private *dev_priv) 1976void intel_power_domains_fini(struct drm_i915_private *dev_priv)
1897{ 1977{
1898 intel_runtime_pm_disable(dev_priv);
1899
1900 /* The i915.ko module is still not prepared to be loaded when 1978 /* The i915.ko module is still not prepared to be loaded when
1901 * the power well is not enabled, so just enable it in case 1979 * the power well is not enabled, so just enable it in case
1902 * we're going to unload/reload. */ 1980 * we're going to unload/reload. */
1903 intel_display_set_init_power(dev_priv, true); 1981 intel_display_set_init_power(dev_priv, true);
1982
1983 /* Remove the refcount we took to keep power well support disabled. */
1984 if (!i915.disable_power_well)
1985 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
1904} 1986}
1905 1987
1906static void intel_power_domains_resume(struct drm_i915_private *dev_priv) 1988static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
1907{ 1989{
1908 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1990 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1909 struct i915_power_well *power_well; 1991 struct i915_power_well *power_well;
@@ -1918,6 +2000,47 @@ static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
1918 mutex_unlock(&power_domains->lock); 2000 mutex_unlock(&power_domains->lock);
1919} 2001}
1920 2002
2003static void skl_display_core_init(struct drm_i915_private *dev_priv,
2004 bool resume)
2005{
2006 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2007 uint32_t val;
2008
2009 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2010
2011 /* enable PCH reset handshake */
2012 val = I915_READ(HSW_NDE_RSTWRN_OPT);
2013 I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
2014
2015 /* enable PG1 and Misc I/O */
2016 mutex_lock(&power_domains->lock);
2017 skl_pw1_misc_io_init(dev_priv);
2018 mutex_unlock(&power_domains->lock);
2019
2020 if (!resume)
2021 return;
2022
2023 skl_init_cdclk(dev_priv);
2024
2025 if (dev_priv->csr.dmc_payload)
2026 intel_csr_load_program(dev_priv);
2027}
2028
2029static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
2030{
2031 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2032
2033 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2034
2035 skl_uninit_cdclk(dev_priv);
2036
2037 /* The spec doesn't call for removing the reset handshake flag */
2038 /* disable PG1 and Misc I/O */
2039 mutex_lock(&power_domains->lock);
2040 skl_pw1_misc_io_fini(dev_priv);
2041 mutex_unlock(&power_domains->lock);
2042}
2043
1921static void chv_phy_control_init(struct drm_i915_private *dev_priv) 2044static void chv_phy_control_init(struct drm_i915_private *dev_priv)
1922{ 2045{
1923 struct i915_power_well *cmn_bc = 2046 struct i915_power_well *cmn_bc =
@@ -2040,14 +2163,16 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
2040 * This function initializes the hardware power domain state and enables all 2163 * This function initializes the hardware power domain state and enables all
2041 * power domains using intel_display_set_init_power(). 2164 * power domains using intel_display_set_init_power().
2042 */ 2165 */
2043void intel_power_domains_init_hw(struct drm_i915_private *dev_priv) 2166void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
2044{ 2167{
2045 struct drm_device *dev = dev_priv->dev; 2168 struct drm_device *dev = dev_priv->dev;
2046 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2169 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2047 2170
2048 power_domains->initializing = true; 2171 power_domains->initializing = true;
2049 2172
2050 if (IS_CHERRYVIEW(dev)) { 2173 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
2174 skl_display_core_init(dev_priv, resume);
2175 } else if (IS_CHERRYVIEW(dev)) {
2051 mutex_lock(&power_domains->lock); 2176 mutex_lock(&power_domains->lock);
2052 chv_phy_control_init(dev_priv); 2177 chv_phy_control_init(dev_priv);
2053 mutex_unlock(&power_domains->lock); 2178 mutex_unlock(&power_domains->lock);
@@ -2059,38 +2184,31 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
2059 2184
2060 /* For now, we need the power well to be always enabled. */ 2185 /* For now, we need the power well to be always enabled. */
2061 intel_display_set_init_power(dev_priv, true); 2186 intel_display_set_init_power(dev_priv, true);
2062 intel_power_domains_resume(dev_priv); 2187 /* Disable power support if the user asked so. */
2188 if (!i915.disable_power_well)
2189 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
2190 intel_power_domains_sync_hw(dev_priv);
2063 power_domains->initializing = false; 2191 power_domains->initializing = false;
2064} 2192}
2065 2193
2066/** 2194/**
2067 * intel_aux_display_runtime_get - grab an auxiliary power domain reference 2195 * intel_power_domains_suspend - suspend power domain state
2068 * @dev_priv: i915 device instance 2196 * @dev_priv: i915 device instance
2069 * 2197 *
2070 * This function grabs a power domain reference for the auxiliary power domain 2198 * This function prepares the hardware power domain state before entering
2071 * (for access to the GMBUS and DP AUX blocks) and ensures that it and all its 2199 * system suspend. It must be paired with intel_power_domains_init_hw().
2072 * parents are powered up. Therefore users should only grab a reference to the
2073 * innermost power domain they need.
2074 *
2075 * Any power domain reference obtained by this function must have a symmetric
2076 * call to intel_aux_display_runtime_put() to release the reference again.
2077 */ 2200 */
2078void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv) 2201void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
2079{ 2202{
2080 intel_runtime_pm_get(dev_priv); 2203 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
2081} 2204 skl_display_core_uninit(dev_priv);
2082 2205
2083/** 2206 /*
2084 * intel_aux_display_runtime_put - release an auxiliary power domain reference 2207 * Even if power well support was disabled we still want to disable
2085 * @dev_priv: i915 device instance 2208 * power wells while we are system suspended.
2086 * 2209 */
2087 * This function drops the auxiliary power domain reference obtained by 2210 if (!i915.disable_power_well)
2088 * intel_aux_display_runtime_get() and might power down the corresponding 2211 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
2089 * hardware block right away if this is the last reference.
2090 */
2091void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
2092{
2093 intel_runtime_pm_put(dev_priv);
2094} 2212}
2095 2213
2096/** 2214/**
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index c42b636c2087..2e1da060b0e1 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -74,7 +74,7 @@ struct intel_sdvo {
74 struct i2c_adapter ddc; 74 struct i2c_adapter ddc;
75 75
76 /* Register for the SDVO device: SDVOB or SDVOC */ 76 /* Register for the SDVO device: SDVOB or SDVOC */
77 uint32_t sdvo_reg; 77 i915_reg_t sdvo_reg;
78 78
79 /* Active outputs controlled by this SDVO output */ 79 /* Active outputs controlled by this SDVO output */
80 uint16_t controlled_output; 80 uint16_t controlled_output;
@@ -120,8 +120,7 @@ struct intel_sdvo {
120 */ 120 */
121 bool is_tv; 121 bool is_tv;
122 122
123 /* On different gens SDVOB is at different places. */ 123 enum port port;
124 bool is_sdvob;
125 124
126 /* This is for current tv format name */ 125 /* This is for current tv format name */
127 int tv_format_index; 126 int tv_format_index;
@@ -245,7 +244,7 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
245 u32 bval = val, cval = val; 244 u32 bval = val, cval = val;
246 int i; 245 int i;
247 246
248 if (intel_sdvo->sdvo_reg == PCH_SDVOB) { 247 if (HAS_PCH_SPLIT(dev_priv)) {
249 I915_WRITE(intel_sdvo->sdvo_reg, val); 248 I915_WRITE(intel_sdvo->sdvo_reg, val);
250 POSTING_READ(intel_sdvo->sdvo_reg); 249 POSTING_READ(intel_sdvo->sdvo_reg);
251 /* 250 /*
@@ -259,7 +258,7 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
259 return; 258 return;
260 } 259 }
261 260
262 if (intel_sdvo->sdvo_reg == GEN3_SDVOB) 261 if (intel_sdvo->port == PORT_B)
263 cval = I915_READ(GEN3_SDVOC); 262 cval = I915_READ(GEN3_SDVOC);
264 else 263 else
265 bval = I915_READ(GEN3_SDVOB); 264 bval = I915_READ(GEN3_SDVOB);
@@ -422,7 +421,7 @@ static const struct _sdvo_cmd_name {
422 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA), 421 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
423}; 422};
424 423
425#define SDVO_NAME(svdo) ((svdo)->is_sdvob ? "SDVOB" : "SDVOC") 424#define SDVO_NAME(svdo) ((svdo)->port == PORT_B ? "SDVOB" : "SDVOC")
426 425
427static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd, 426static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
428 const void *args, int args_len) 427 const void *args, int args_len)
@@ -1282,14 +1281,10 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
1282 sdvox |= SDVO_BORDER_ENABLE; 1281 sdvox |= SDVO_BORDER_ENABLE;
1283 } else { 1282 } else {
1284 sdvox = I915_READ(intel_sdvo->sdvo_reg); 1283 sdvox = I915_READ(intel_sdvo->sdvo_reg);
1285 switch (intel_sdvo->sdvo_reg) { 1284 if (intel_sdvo->port == PORT_B)
1286 case GEN3_SDVOB:
1287 sdvox &= SDVOB_PRESERVE_MASK; 1285 sdvox &= SDVOB_PRESERVE_MASK;
1288 break; 1286 else
1289 case GEN3_SDVOC:
1290 sdvox &= SDVOC_PRESERVE_MASK; 1287 sdvox &= SDVOC_PRESERVE_MASK;
1291 break;
1292 }
1293 sdvox |= (9 << 19) | SDVO_BORDER_ENABLE; 1288 sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
1294 } 1289 }
1295 1290
@@ -1464,12 +1459,23 @@ static void intel_disable_sdvo(struct intel_encoder *encoder)
1464 * matching DP port to be enabled on transcoder A. 1459 * matching DP port to be enabled on transcoder A.
1465 */ 1460 */
1466 if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B) { 1461 if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B) {
1462 /*
1463 * We get CPU/PCH FIFO underruns on the other pipe when
1464 * doing the workaround. Sweep them under the rug.
1465 */
1466 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
1467 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
1468
1467 temp &= ~SDVO_PIPE_B_SELECT; 1469 temp &= ~SDVO_PIPE_B_SELECT;
1468 temp |= SDVO_ENABLE; 1470 temp |= SDVO_ENABLE;
1469 intel_sdvo_write_sdvox(intel_sdvo, temp); 1471 intel_sdvo_write_sdvox(intel_sdvo, temp);
1470 1472
1471 temp &= ~SDVO_ENABLE; 1473 temp &= ~SDVO_ENABLE;
1472 intel_sdvo_write_sdvox(intel_sdvo, temp); 1474 intel_sdvo_write_sdvox(intel_sdvo, temp);
1475
1476 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
1477 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
1478 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
1473 } 1479 }
1474} 1480}
1475 1481
@@ -2251,7 +2257,7 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
2251{ 2257{
2252 struct sdvo_device_mapping *mapping; 2258 struct sdvo_device_mapping *mapping;
2253 2259
2254 if (sdvo->is_sdvob) 2260 if (sdvo->port == PORT_B)
2255 mapping = &(dev_priv->sdvo_mappings[0]); 2261 mapping = &(dev_priv->sdvo_mappings[0]);
2256 else 2262 else
2257 mapping = &(dev_priv->sdvo_mappings[1]); 2263 mapping = &(dev_priv->sdvo_mappings[1]);
@@ -2269,7 +2275,7 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
2269 struct sdvo_device_mapping *mapping; 2275 struct sdvo_device_mapping *mapping;
2270 u8 pin; 2276 u8 pin;
2271 2277
2272 if (sdvo->is_sdvob) 2278 if (sdvo->port == PORT_B)
2273 mapping = &dev_priv->sdvo_mappings[0]; 2279 mapping = &dev_priv->sdvo_mappings[0];
2274 else 2280 else
2275 mapping = &dev_priv->sdvo_mappings[1]; 2281 mapping = &dev_priv->sdvo_mappings[1];
@@ -2307,7 +2313,7 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
2307 struct drm_i915_private *dev_priv = dev->dev_private; 2313 struct drm_i915_private *dev_priv = dev->dev_private;
2308 struct sdvo_device_mapping *my_mapping, *other_mapping; 2314 struct sdvo_device_mapping *my_mapping, *other_mapping;
2309 2315
2310 if (sdvo->is_sdvob) { 2316 if (sdvo->port == PORT_B) {
2311 my_mapping = &dev_priv->sdvo_mappings[0]; 2317 my_mapping = &dev_priv->sdvo_mappings[0];
2312 other_mapping = &dev_priv->sdvo_mappings[1]; 2318 other_mapping = &dev_priv->sdvo_mappings[1];
2313 } else { 2319 } else {
@@ -2332,7 +2338,7 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
2332 /* No SDVO device info is found for another DVO port, 2338 /* No SDVO device info is found for another DVO port,
2333 * so use mapping assumption we had before BIOS parsing. 2339 * so use mapping assumption we had before BIOS parsing.
2334 */ 2340 */
2335 if (sdvo->is_sdvob) 2341 if (sdvo->port == PORT_B)
2336 return 0x70; 2342 return 0x70;
2337 else 2343 else
2338 return 0x72; 2344 return 0x72;
@@ -2939,18 +2945,31 @@ intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
2939 return i2c_add_adapter(&sdvo->ddc) == 0; 2945 return i2c_add_adapter(&sdvo->ddc) == 0;
2940} 2946}
2941 2947
2942bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) 2948static void assert_sdvo_port_valid(const struct drm_i915_private *dev_priv,
2949 enum port port)
2950{
2951 if (HAS_PCH_SPLIT(dev_priv))
2952 WARN_ON(port != PORT_B);
2953 else
2954 WARN_ON(port != PORT_B && port != PORT_C);
2955}
2956
2957bool intel_sdvo_init(struct drm_device *dev,
2958 i915_reg_t sdvo_reg, enum port port)
2943{ 2959{
2944 struct drm_i915_private *dev_priv = dev->dev_private; 2960 struct drm_i915_private *dev_priv = dev->dev_private;
2945 struct intel_encoder *intel_encoder; 2961 struct intel_encoder *intel_encoder;
2946 struct intel_sdvo *intel_sdvo; 2962 struct intel_sdvo *intel_sdvo;
2947 int i; 2963 int i;
2964
2965 assert_sdvo_port_valid(dev_priv, port);
2966
2948 intel_sdvo = kzalloc(sizeof(*intel_sdvo), GFP_KERNEL); 2967 intel_sdvo = kzalloc(sizeof(*intel_sdvo), GFP_KERNEL);
2949 if (!intel_sdvo) 2968 if (!intel_sdvo)
2950 return false; 2969 return false;
2951 2970
2952 intel_sdvo->sdvo_reg = sdvo_reg; 2971 intel_sdvo->sdvo_reg = sdvo_reg;
2953 intel_sdvo->is_sdvob = is_sdvob; 2972 intel_sdvo->port = port;
2954 intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1; 2973 intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
2955 intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo); 2974 intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo);
2956 if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) 2975 if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev))
@@ -2959,7 +2978,8 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
2959 /* encoder type will be decided later */ 2978 /* encoder type will be decided later */
2960 intel_encoder = &intel_sdvo->base; 2979 intel_encoder = &intel_sdvo->base;
2961 intel_encoder->type = INTEL_OUTPUT_SDVO; 2980 intel_encoder->type = INTEL_OUTPUT_SDVO;
2962 drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0); 2981 drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0,
2982 NULL);
2963 2983
2964 /* Read the regs to test if we can talk to the device */ 2984 /* Read the regs to test if we can talk to the device */
2965 for (i = 0; i < 0x40; i++) { 2985 for (i = 0; i < 0x40; i++) {
@@ -3000,8 +3020,10 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
3000 * hotplug lines. 3020 * hotplug lines.
3001 */ 3021 */
3002 if (intel_sdvo->hotplug_active) { 3022 if (intel_sdvo->hotplug_active) {
3003 intel_encoder->hpd_pin = 3023 if (intel_sdvo->port == PORT_B)
3004 intel_sdvo->is_sdvob ? HPD_SDVO_B : HPD_SDVO_C; 3024 intel_encoder->hpd_pin = HPD_SDVO_B;
3025 else
3026 intel_encoder->hpd_pin = HPD_SDVO_C;
3005 } 3027 }
3006 3028
3007 /* 3029 /*
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 56dc132e8e20..dbf421351b5c 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -192,10 +192,9 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
192 const int pipe = intel_plane->pipe; 192 const int pipe = intel_plane->pipe;
193 const int plane = intel_plane->plane + 1; 193 const int plane = intel_plane->plane + 1;
194 u32 plane_ctl, stride_div, stride; 194 u32 plane_ctl, stride_div, stride;
195 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
196 const struct drm_intel_sprite_colorkey *key = 195 const struct drm_intel_sprite_colorkey *key =
197 &to_intel_plane_state(drm_plane->state)->ckey; 196 &to_intel_plane_state(drm_plane->state)->ckey;
198 unsigned long surf_addr; 197 u32 surf_addr;
199 u32 tile_height, plane_offset, plane_size; 198 u32 tile_height, plane_offset, plane_size;
200 unsigned int rotation; 199 unsigned int rotation;
201 int x_offset, y_offset; 200 int x_offset, y_offset;
@@ -212,10 +211,6 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
212 rotation = drm_plane->state->rotation; 211 rotation = drm_plane->state->rotation;
213 plane_ctl |= skl_plane_ctl_rotation(rotation); 212 plane_ctl |= skl_plane_ctl_rotation(rotation);
214 213
215 intel_update_sprite_watermarks(drm_plane, crtc, src_w, src_h,
216 pixel_size, true,
217 src_w != crtc_w || src_h != crtc_h);
218
219 stride_div = intel_fb_stride_alignment(dev, fb->modifier[0], 214 stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
220 fb->pixel_format); 215 fb->pixel_format);
221 216
@@ -297,8 +292,6 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
297 292
298 I915_WRITE(PLANE_SURF(pipe, plane), 0); 293 I915_WRITE(PLANE_SURF(pipe, plane), 0);
299 POSTING_READ(PLANE_SURF(pipe, plane)); 294 POSTING_READ(PLANE_SURF(pipe, plane));
300
301 intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false);
302} 295}
303 296
304static void 297static void
@@ -541,10 +534,6 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
541 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 534 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
542 sprctl |= SPRITE_PIPE_CSC_ENABLE; 535 sprctl |= SPRITE_PIPE_CSC_ENABLE;
543 536
544 intel_update_sprite_watermarks(plane, crtc, src_w, src_h, pixel_size,
545 true,
546 src_w != crtc_w || src_h != crtc_h);
547
548 /* Sizes are 0 based */ 537 /* Sizes are 0 based */
549 src_w--; 538 src_w--;
550 src_h--; 539 src_h--;
@@ -678,10 +667,6 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
678 if (IS_GEN6(dev)) 667 if (IS_GEN6(dev))
679 dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */ 668 dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
680 669
681 intel_update_sprite_watermarks(plane, crtc, src_w, src_h,
682 pixel_size, true,
683 src_w != crtc_w || src_h != crtc_h);
684
685 /* Sizes are 0 based */ 670 /* Sizes are 0 based */
686 src_w--; 671 src_w--;
687 src_h--; 672 src_h--;
@@ -832,8 +817,8 @@ intel_check_sprite_plane(struct drm_plane *plane,
832 hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale); 817 hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
833 if (hscale < 0) { 818 if (hscale < 0) {
834 DRM_DEBUG_KMS("Horizontal scaling factor out of limits\n"); 819 DRM_DEBUG_KMS("Horizontal scaling factor out of limits\n");
835 drm_rect_debug_print(src, true); 820 drm_rect_debug_print("src: ", src, true);
836 drm_rect_debug_print(dst, false); 821 drm_rect_debug_print("dst: ", dst, false);
837 822
838 return hscale; 823 return hscale;
839 } 824 }
@@ -841,8 +826,8 @@ intel_check_sprite_plane(struct drm_plane *plane,
841 vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale); 826 vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
842 if (vscale < 0) { 827 if (vscale < 0) {
843 DRM_DEBUG_KMS("Vertical scaling factor out of limits\n"); 828 DRM_DEBUG_KMS("Vertical scaling factor out of limits\n");
844 drm_rect_debug_print(src, true); 829 drm_rect_debug_print("src: ", src, true);
845 drm_rect_debug_print(dst, false); 830 drm_rect_debug_print("dst: ", dst, false);
846 831
847 return vscale; 832 return vscale;
848 } 833 }
@@ -938,9 +923,6 @@ intel_commit_sprite_plane(struct drm_plane *plane,
938 923
939 crtc = crtc ? crtc : plane->crtc; 924 crtc = crtc ? crtc : plane->crtc;
940 925
941 if (!crtc->state->active)
942 return;
943
944 if (state->visible) { 926 if (state->visible) {
945 intel_plane->update_plane(plane, crtc, fb, 927 intel_plane->update_plane(plane, crtc, fb,
946 state->dst.x1, state->dst.y1, 928 state->dst.x1, state->dst.y1,
@@ -1141,7 +1123,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
1141 ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs, 1123 ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
1142 &intel_plane_funcs, 1124 &intel_plane_funcs,
1143 plane_formats, num_plane_formats, 1125 plane_formats, num_plane_formats,
1144 DRM_PLANE_TYPE_OVERLAY); 1126 DRM_PLANE_TYPE_OVERLAY, NULL);
1145 if (ret) { 1127 if (ret) {
1146 kfree(intel_plane); 1128 kfree(intel_plane);
1147 goto out; 1129 goto out;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 6bea78944cd6..948cbff6c62e 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1645,7 +1645,7 @@ intel_tv_init(struct drm_device *dev)
1645 DRM_MODE_CONNECTOR_SVIDEO); 1645 DRM_MODE_CONNECTOR_SVIDEO);
1646 1646
1647 drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs, 1647 drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
1648 DRM_MODE_ENCODER_TVDAC); 1648 DRM_MODE_ENCODER_TVDAC, NULL);
1649 1649
1650 intel_encoder->compute_config = intel_tv_compute_config; 1650 intel_encoder->compute_config = intel_tv_compute_config;
1651 intel_encoder->get_config = intel_tv_get_config; 1651 intel_encoder->get_config = intel_tv_get_config;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 43cba129a0c0..c2358ba78b30 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -29,19 +29,7 @@
29 29
30#define FORCEWAKE_ACK_TIMEOUT_MS 50 30#define FORCEWAKE_ACK_TIMEOUT_MS 50
31 31
32#define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__)) 32#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
33#define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
34
35#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
36#define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
37
38#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
39#define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
40
41#define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
42#define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
43
44#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
45 33
46static const char * const forcewake_domain_names[] = { 34static const char * const forcewake_domain_names[] = {
47 "render", 35 "render",
@@ -72,7 +60,7 @@ assert_device_not_suspended(struct drm_i915_private *dev_priv)
72static inline void 60static inline void
73fw_domain_reset(const struct intel_uncore_forcewake_domain *d) 61fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
74{ 62{
75 WARN_ON(d->reg_set == 0); 63 WARN_ON(!i915_mmio_reg_valid(d->reg_set));
76 __raw_i915_write32(d->i915, d->reg_set, d->val_reset); 64 __raw_i915_write32(d->i915, d->reg_set, d->val_reset);
77} 65}
78 66
@@ -118,7 +106,7 @@ static inline void
118fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d) 106fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
119{ 107{
120 /* something from same cacheline, but not from the set register */ 108 /* something from same cacheline, but not from the set register */
121 if (d->reg_post) 109 if (i915_mmio_reg_valid(d->reg_post))
122 __raw_posting_read(d->i915, d->reg_post); 110 __raw_posting_read(d->i915, d->reg_post);
123} 111}
124 112
@@ -525,8 +513,7 @@ void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
525} 513}
526 514
527/* We give fast paths for the really cool registers */ 515/* We give fast paths for the really cool registers */
528#define NEEDS_FORCE_WAKE(reg) \ 516#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
529 ((reg) < 0x40000 && (reg) != FORCEWAKE)
530 517
531#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end)) 518#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
532 519
@@ -589,7 +576,7 @@ void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
589 REG_RANGE((reg), 0x9400, 0x9800) 576 REG_RANGE((reg), 0x9400, 0x9800)
590 577
591#define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \ 578#define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
592 ((reg) < 0x40000 &&\ 579 ((reg) < 0x40000 && \
593 !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \ 580 !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
594 !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \ 581 !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
595 !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \ 582 !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
@@ -605,8 +592,8 @@ ilk_dummy_write(struct drm_i915_private *dev_priv)
605} 592}
606 593
607static void 594static void
608hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read, 595hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv,
609 bool before) 596 i915_reg_t reg, bool read, bool before)
610{ 597{
611 const char *op = read ? "reading" : "writing to"; 598 const char *op = read ? "reading" : "writing to";
612 const char *when = before ? "before" : "after"; 599 const char *when = before ? "before" : "after";
@@ -616,7 +603,7 @@ hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read,
616 603
617 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { 604 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
618 WARN(1, "Unclaimed register detected %s %s register 0x%x\n", 605 WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
619 when, op, reg); 606 when, op, i915_mmio_reg_offset(reg));
620 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 607 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
621 i915.mmio_debug--; /* Only report the first N failures */ 608 i915.mmio_debug--; /* Only report the first N failures */
622 } 609 }
@@ -649,7 +636,7 @@ hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
649 636
650#define __gen2_read(x) \ 637#define __gen2_read(x) \
651static u##x \ 638static u##x \
652gen2_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 639gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
653 GEN2_READ_HEADER(x); \ 640 GEN2_READ_HEADER(x); \
654 val = __raw_i915_read##x(dev_priv, reg); \ 641 val = __raw_i915_read##x(dev_priv, reg); \
655 GEN2_READ_FOOTER; \ 642 GEN2_READ_FOOTER; \
@@ -657,7 +644,7 @@ gen2_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
657 644
658#define __gen5_read(x) \ 645#define __gen5_read(x) \
659static u##x \ 646static u##x \
660gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 647gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
661 GEN2_READ_HEADER(x); \ 648 GEN2_READ_HEADER(x); \
662 ilk_dummy_write(dev_priv); \ 649 ilk_dummy_write(dev_priv); \
663 val = __raw_i915_read##x(dev_priv, reg); \ 650 val = __raw_i915_read##x(dev_priv, reg); \
@@ -680,6 +667,7 @@ __gen2_read(64)
680#undef GEN2_READ_HEADER 667#undef GEN2_READ_HEADER
681 668
682#define GEN6_READ_HEADER(x) \ 669#define GEN6_READ_HEADER(x) \
670 u32 offset = i915_mmio_reg_offset(reg); \
683 unsigned long irqflags; \ 671 unsigned long irqflags; \
684 u##x val = 0; \ 672 u##x val = 0; \
685 assert_device_not_suspended(dev_priv); \ 673 assert_device_not_suspended(dev_priv); \
@@ -714,20 +702,12 @@ static inline void __force_wake_get(struct drm_i915_private *dev_priv,
714 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); 702 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
715} 703}
716 704
717#define __vgpu_read(x) \
718static u##x \
719vgpu_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
720 GEN6_READ_HEADER(x); \
721 val = __raw_i915_read##x(dev_priv, reg); \
722 GEN6_READ_FOOTER; \
723}
724
725#define __gen6_read(x) \ 705#define __gen6_read(x) \
726static u##x \ 706static u##x \
727gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 707gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
728 GEN6_READ_HEADER(x); \ 708 GEN6_READ_HEADER(x); \
729 hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \ 709 hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
730 if (NEEDS_FORCE_WAKE(reg)) \ 710 if (NEEDS_FORCE_WAKE(offset)) \
731 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 711 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
732 val = __raw_i915_read##x(dev_priv, reg); \ 712 val = __raw_i915_read##x(dev_priv, reg); \
733 hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \ 713 hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
@@ -736,47 +716,56 @@ gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
736 716
737#define __vlv_read(x) \ 717#define __vlv_read(x) \
738static u##x \ 718static u##x \
739vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 719vlv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
720 enum forcewake_domains fw_engine = 0; \
740 GEN6_READ_HEADER(x); \ 721 GEN6_READ_HEADER(x); \
741 if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) \ 722 if (!NEEDS_FORCE_WAKE(offset)) \
742 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 723 fw_engine = 0; \
743 else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) \ 724 else if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(offset)) \
744 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \ 725 fw_engine = FORCEWAKE_RENDER; \
726 else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(offset)) \
727 fw_engine = FORCEWAKE_MEDIA; \
728 if (fw_engine) \
729 __force_wake_get(dev_priv, fw_engine); \
745 val = __raw_i915_read##x(dev_priv, reg); \ 730 val = __raw_i915_read##x(dev_priv, reg); \
746 GEN6_READ_FOOTER; \ 731 GEN6_READ_FOOTER; \
747} 732}
748 733
749#define __chv_read(x) \ 734#define __chv_read(x) \
750static u##x \ 735static u##x \
751chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 736chv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
737 enum forcewake_domains fw_engine = 0; \
752 GEN6_READ_HEADER(x); \ 738 GEN6_READ_HEADER(x); \
753 if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \ 739 if (!NEEDS_FORCE_WAKE(offset)) \
754 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 740 fw_engine = 0; \
755 else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \ 741 else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
756 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \ 742 fw_engine = FORCEWAKE_RENDER; \
757 else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \ 743 else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
758 __force_wake_get(dev_priv, \ 744 fw_engine = FORCEWAKE_MEDIA; \
759 FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \ 745 else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
746 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
747 if (fw_engine) \
748 __force_wake_get(dev_priv, fw_engine); \
760 val = __raw_i915_read##x(dev_priv, reg); \ 749 val = __raw_i915_read##x(dev_priv, reg); \
761 GEN6_READ_FOOTER; \ 750 GEN6_READ_FOOTER; \
762} 751}
763 752
764#define SKL_NEEDS_FORCE_WAKE(reg) \ 753#define SKL_NEEDS_FORCE_WAKE(reg) \
765 ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg)) 754 ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
766 755
767#define __gen9_read(x) \ 756#define __gen9_read(x) \
768static u##x \ 757static u##x \
769gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 758gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
770 enum forcewake_domains fw_engine; \ 759 enum forcewake_domains fw_engine; \
771 GEN6_READ_HEADER(x); \ 760 GEN6_READ_HEADER(x); \
772 hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \ 761 hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
773 if (!SKL_NEEDS_FORCE_WAKE(reg)) \ 762 if (!SKL_NEEDS_FORCE_WAKE(offset)) \
774 fw_engine = 0; \ 763 fw_engine = 0; \
775 else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \ 764 else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
776 fw_engine = FORCEWAKE_RENDER; \ 765 fw_engine = FORCEWAKE_RENDER; \
777 else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \ 766 else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
778 fw_engine = FORCEWAKE_MEDIA; \ 767 fw_engine = FORCEWAKE_MEDIA; \
779 else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \ 768 else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
780 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \ 769 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
781 else \ 770 else \
782 fw_engine = FORCEWAKE_BLITTER; \ 771 fw_engine = FORCEWAKE_BLITTER; \
@@ -787,10 +776,6 @@ gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
787 GEN6_READ_FOOTER; \ 776 GEN6_READ_FOOTER; \
788} 777}
789 778
790__vgpu_read(8)
791__vgpu_read(16)
792__vgpu_read(32)
793__vgpu_read(64)
794__gen9_read(8) 779__gen9_read(8)
795__gen9_read(16) 780__gen9_read(16)
796__gen9_read(32) 781__gen9_read(32)
@@ -812,10 +797,37 @@ __gen6_read(64)
812#undef __chv_read 797#undef __chv_read
813#undef __vlv_read 798#undef __vlv_read
814#undef __gen6_read 799#undef __gen6_read
815#undef __vgpu_read
816#undef GEN6_READ_FOOTER 800#undef GEN6_READ_FOOTER
817#undef GEN6_READ_HEADER 801#undef GEN6_READ_HEADER
818 802
803#define VGPU_READ_HEADER(x) \
804 unsigned long irqflags; \
805 u##x val = 0; \
806 assert_device_not_suspended(dev_priv); \
807 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
808
809#define VGPU_READ_FOOTER \
810 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
811 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
812 return val
813
814#define __vgpu_read(x) \
815static u##x \
816vgpu_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
817 VGPU_READ_HEADER(x); \
818 val = __raw_i915_read##x(dev_priv, reg); \
819 VGPU_READ_FOOTER; \
820}
821
822__vgpu_read(8)
823__vgpu_read(16)
824__vgpu_read(32)
825__vgpu_read(64)
826
827#undef __vgpu_read
828#undef VGPU_READ_FOOTER
829#undef VGPU_READ_HEADER
830
819#define GEN2_WRITE_HEADER \ 831#define GEN2_WRITE_HEADER \
820 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 832 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
821 assert_device_not_suspended(dev_priv); \ 833 assert_device_not_suspended(dev_priv); \
@@ -824,7 +836,7 @@ __gen6_read(64)
824 836
825#define __gen2_write(x) \ 837#define __gen2_write(x) \
826static void \ 838static void \
827gen2_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 839gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
828 GEN2_WRITE_HEADER; \ 840 GEN2_WRITE_HEADER; \
829 __raw_i915_write##x(dev_priv, reg, val); \ 841 __raw_i915_write##x(dev_priv, reg, val); \
830 GEN2_WRITE_FOOTER; \ 842 GEN2_WRITE_FOOTER; \
@@ -832,7 +844,7 @@ gen2_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace
832 844
833#define __gen5_write(x) \ 845#define __gen5_write(x) \
834static void \ 846static void \
835gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 847gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
836 GEN2_WRITE_HEADER; \ 848 GEN2_WRITE_HEADER; \
837 ilk_dummy_write(dev_priv); \ 849 ilk_dummy_write(dev_priv); \
838 __raw_i915_write##x(dev_priv, reg, val); \ 850 __raw_i915_write##x(dev_priv, reg, val); \
@@ -855,6 +867,7 @@ __gen2_write(64)
855#undef GEN2_WRITE_HEADER 867#undef GEN2_WRITE_HEADER
856 868
857#define GEN6_WRITE_HEADER \ 869#define GEN6_WRITE_HEADER \
870 u32 offset = i915_mmio_reg_offset(reg); \
858 unsigned long irqflags; \ 871 unsigned long irqflags; \
859 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 872 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
860 assert_device_not_suspended(dev_priv); \ 873 assert_device_not_suspended(dev_priv); \
@@ -865,10 +878,10 @@ __gen2_write(64)
865 878
866#define __gen6_write(x) \ 879#define __gen6_write(x) \
867static void \ 880static void \
868gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 881gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
869 u32 __fifo_ret = 0; \ 882 u32 __fifo_ret = 0; \
870 GEN6_WRITE_HEADER; \ 883 GEN6_WRITE_HEADER; \
871 if (NEEDS_FORCE_WAKE(reg)) { \ 884 if (NEEDS_FORCE_WAKE(offset)) { \
872 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 885 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
873 } \ 886 } \
874 __raw_i915_write##x(dev_priv, reg, val); \ 887 __raw_i915_write##x(dev_priv, reg, val); \
@@ -880,10 +893,10 @@ gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace
880 893
881#define __hsw_write(x) \ 894#define __hsw_write(x) \
882static void \ 895static void \
883hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 896hsw_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
884 u32 __fifo_ret = 0; \ 897 u32 __fifo_ret = 0; \
885 GEN6_WRITE_HEADER; \ 898 GEN6_WRITE_HEADER; \
886 if (NEEDS_FORCE_WAKE(reg)) { \ 899 if (NEEDS_FORCE_WAKE(offset)) { \
887 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 900 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
888 } \ 901 } \
889 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ 902 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
@@ -896,15 +909,7 @@ hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace)
896 GEN6_WRITE_FOOTER; \ 909 GEN6_WRITE_FOOTER; \
897} 910}
898 911
899#define __vgpu_write(x) \ 912static const i915_reg_t gen8_shadowed_regs[] = {
900static void vgpu_write##x(struct drm_i915_private *dev_priv, \
901 off_t reg, u##x val, bool trace) { \
902 GEN6_WRITE_HEADER; \
903 __raw_i915_write##x(dev_priv, reg, val); \
904 GEN6_WRITE_FOOTER; \
905}
906
907static const u32 gen8_shadowed_regs[] = {
908 FORCEWAKE_MT, 913 FORCEWAKE_MT,
909 GEN6_RPNSWREQ, 914 GEN6_RPNSWREQ,
910 GEN6_RC_VIDEO_FREQ, 915 GEN6_RC_VIDEO_FREQ,
@@ -915,11 +920,12 @@ static const u32 gen8_shadowed_regs[] = {
915 /* TODO: Other registers are not yet used */ 920 /* TODO: Other registers are not yet used */
916}; 921};
917 922
918static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg) 923static bool is_gen8_shadowed(struct drm_i915_private *dev_priv,
924 i915_reg_t reg)
919{ 925{
920 int i; 926 int i;
921 for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++) 927 for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
922 if (reg == gen8_shadowed_regs[i]) 928 if (i915_mmio_reg_equal(reg, gen8_shadowed_regs[i]))
923 return true; 929 return true;
924 930
925 return false; 931 return false;
@@ -927,10 +933,10 @@ static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
927 933
928#define __gen8_write(x) \ 934#define __gen8_write(x) \
929static void \ 935static void \
930gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 936gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
931 GEN6_WRITE_HEADER; \ 937 GEN6_WRITE_HEADER; \
932 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ 938 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
933 if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) \ 939 if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(dev_priv, reg)) \
934 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 940 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
935 __raw_i915_write##x(dev_priv, reg, val); \ 941 __raw_i915_write##x(dev_priv, reg, val); \
936 hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \ 942 hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
@@ -940,22 +946,25 @@ gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace
940 946
941#define __chv_write(x) \ 947#define __chv_write(x) \
942static void \ 948static void \
943chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 949chv_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
944 bool shadowed = is_gen8_shadowed(dev_priv, reg); \ 950 enum forcewake_domains fw_engine = 0; \
945 GEN6_WRITE_HEADER; \ 951 GEN6_WRITE_HEADER; \
946 if (!shadowed) { \ 952 if (!NEEDS_FORCE_WAKE(offset) || \
947 if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \ 953 is_gen8_shadowed(dev_priv, reg)) \
948 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 954 fw_engine = 0; \
949 else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \ 955 else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
950 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \ 956 fw_engine = FORCEWAKE_RENDER; \
951 else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \ 957 else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
952 __force_wake_get(dev_priv, FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \ 958 fw_engine = FORCEWAKE_MEDIA; \
953 } \ 959 else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
960 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
961 if (fw_engine) \
962 __force_wake_get(dev_priv, fw_engine); \
954 __raw_i915_write##x(dev_priv, reg, val); \ 963 __raw_i915_write##x(dev_priv, reg, val); \
955 GEN6_WRITE_FOOTER; \ 964 GEN6_WRITE_FOOTER; \
956} 965}
957 966
958static const u32 gen9_shadowed_regs[] = { 967static const i915_reg_t gen9_shadowed_regs[] = {
959 RING_TAIL(RENDER_RING_BASE), 968 RING_TAIL(RENDER_RING_BASE),
960 RING_TAIL(GEN6_BSD_RING_BASE), 969 RING_TAIL(GEN6_BSD_RING_BASE),
961 RING_TAIL(VEBOX_RING_BASE), 970 RING_TAIL(VEBOX_RING_BASE),
@@ -968,11 +977,12 @@ static const u32 gen9_shadowed_regs[] = {
968 /* TODO: Other registers are not yet used */ 977 /* TODO: Other registers are not yet used */
969}; 978};
970 979
971static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg) 980static bool is_gen9_shadowed(struct drm_i915_private *dev_priv,
981 i915_reg_t reg)
972{ 982{
973 int i; 983 int i;
974 for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++) 984 for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
975 if (reg == gen9_shadowed_regs[i]) 985 if (i915_mmio_reg_equal(reg, gen9_shadowed_regs[i]))
976 return true; 986 return true;
977 987
978 return false; 988 return false;
@@ -980,19 +990,19 @@ static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg)
980 990
981#define __gen9_write(x) \ 991#define __gen9_write(x) \
982static void \ 992static void \
983gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \ 993gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \
984 bool trace) { \ 994 bool trace) { \
985 enum forcewake_domains fw_engine; \ 995 enum forcewake_domains fw_engine; \
986 GEN6_WRITE_HEADER; \ 996 GEN6_WRITE_HEADER; \
987 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ 997 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
988 if (!SKL_NEEDS_FORCE_WAKE(reg) || \ 998 if (!SKL_NEEDS_FORCE_WAKE(offset) || \
989 is_gen9_shadowed(dev_priv, reg)) \ 999 is_gen9_shadowed(dev_priv, reg)) \
990 fw_engine = 0; \ 1000 fw_engine = 0; \
991 else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \ 1001 else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
992 fw_engine = FORCEWAKE_RENDER; \ 1002 fw_engine = FORCEWAKE_RENDER; \
993 else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \ 1003 else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
994 fw_engine = FORCEWAKE_MEDIA; \ 1004 fw_engine = FORCEWAKE_MEDIA; \
995 else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \ 1005 else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
996 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \ 1006 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
997 else \ 1007 else \
998 fw_engine = FORCEWAKE_BLITTER; \ 1008 fw_engine = FORCEWAKE_BLITTER; \
@@ -1024,20 +1034,41 @@ __gen6_write(8)
1024__gen6_write(16) 1034__gen6_write(16)
1025__gen6_write(32) 1035__gen6_write(32)
1026__gen6_write(64) 1036__gen6_write(64)
1027__vgpu_write(8)
1028__vgpu_write(16)
1029__vgpu_write(32)
1030__vgpu_write(64)
1031 1037
1032#undef __gen9_write 1038#undef __gen9_write
1033#undef __chv_write 1039#undef __chv_write
1034#undef __gen8_write 1040#undef __gen8_write
1035#undef __hsw_write 1041#undef __hsw_write
1036#undef __gen6_write 1042#undef __gen6_write
1037#undef __vgpu_write
1038#undef GEN6_WRITE_FOOTER 1043#undef GEN6_WRITE_FOOTER
1039#undef GEN6_WRITE_HEADER 1044#undef GEN6_WRITE_HEADER
1040 1045
1046#define VGPU_WRITE_HEADER \
1047 unsigned long irqflags; \
1048 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1049 assert_device_not_suspended(dev_priv); \
1050 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
1051
1052#define VGPU_WRITE_FOOTER \
1053 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
1054
1055#define __vgpu_write(x) \
1056static void vgpu_write##x(struct drm_i915_private *dev_priv, \
1057 i915_reg_t reg, u##x val, bool trace) { \
1058 VGPU_WRITE_HEADER; \
1059 __raw_i915_write##x(dev_priv, reg, val); \
1060 VGPU_WRITE_FOOTER; \
1061}
1062
1063__vgpu_write(8)
1064__vgpu_write(16)
1065__vgpu_write(32)
1066__vgpu_write(64)
1067
1068#undef __vgpu_write
1069#undef VGPU_WRITE_FOOTER
1070#undef VGPU_WRITE_HEADER
1071
1041#define ASSIGN_WRITE_MMIO_VFUNCS(x) \ 1072#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
1042do { \ 1073do { \
1043 dev_priv->uncore.funcs.mmio_writeb = x##_write8; \ 1074 dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
@@ -1057,7 +1088,8 @@ do { \
1057 1088
1058static void fw_domain_init(struct drm_i915_private *dev_priv, 1089static void fw_domain_init(struct drm_i915_private *dev_priv,
1059 enum forcewake_domain_id domain_id, 1090 enum forcewake_domain_id domain_id,
1060 u32 reg_set, u32 reg_ack) 1091 i915_reg_t reg_set,
1092 i915_reg_t reg_ack)
1061{ 1093{
1062 struct intel_uncore_forcewake_domain *d; 1094 struct intel_uncore_forcewake_domain *d;
1063 1095
@@ -1087,8 +1119,6 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
1087 d->reg_post = FORCEWAKE_ACK_VLV; 1119 d->reg_post = FORCEWAKE_ACK_VLV;
1088 else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) 1120 else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
1089 d->reg_post = ECOBUS; 1121 d->reg_post = ECOBUS;
1090 else
1091 d->reg_post = 0;
1092 1122
1093 d->i915 = dev_priv; 1123 d->i915 = dev_priv;
1094 d->id = domain_id; 1124 d->id = domain_id;
@@ -1262,12 +1292,14 @@ void intel_uncore_fini(struct drm_device *dev)
1262#define GEN_RANGE(l, h) GENMASK(h, l) 1292#define GEN_RANGE(l, h) GENMASK(h, l)
1263 1293
1264static const struct register_whitelist { 1294static const struct register_whitelist {
1265 uint64_t offset; 1295 i915_reg_t offset_ldw, offset_udw;
1266 uint32_t size; 1296 uint32_t size;
1267 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ 1297 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1268 uint32_t gen_bitmask; 1298 uint32_t gen_bitmask;
1269} whitelist[] = { 1299} whitelist[] = {
1270 { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) }, 1300 { .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
1301 .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
1302 .size = 8, .gen_bitmask = GEN_RANGE(4, 9) },
1271}; 1303};
1272 1304
1273int i915_reg_read_ioctl(struct drm_device *dev, 1305int i915_reg_read_ioctl(struct drm_device *dev,
@@ -1277,11 +1309,11 @@ int i915_reg_read_ioctl(struct drm_device *dev,
1277 struct drm_i915_reg_read *reg = data; 1309 struct drm_i915_reg_read *reg = data;
1278 struct register_whitelist const *entry = whitelist; 1310 struct register_whitelist const *entry = whitelist;
1279 unsigned size; 1311 unsigned size;
1280 u64 offset; 1312 i915_reg_t offset_ldw, offset_udw;
1281 int i, ret = 0; 1313 int i, ret = 0;
1282 1314
1283 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { 1315 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1284 if (entry->offset == (reg->offset & -entry->size) && 1316 if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
1285 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) 1317 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
1286 break; 1318 break;
1287 } 1319 }
@@ -1293,27 +1325,28 @@ int i915_reg_read_ioctl(struct drm_device *dev,
1293 * be naturally aligned (and those that are not so aligned merely 1325 * be naturally aligned (and those that are not so aligned merely
1294 * limit the available flags for that register). 1326 * limit the available flags for that register).
1295 */ 1327 */
1296 offset = entry->offset; 1328 offset_ldw = entry->offset_ldw;
1329 offset_udw = entry->offset_udw;
1297 size = entry->size; 1330 size = entry->size;
1298 size |= reg->offset ^ offset; 1331 size |= reg->offset ^ i915_mmio_reg_offset(offset_ldw);
1299 1332
1300 intel_runtime_pm_get(dev_priv); 1333 intel_runtime_pm_get(dev_priv);
1301 1334
1302 switch (size) { 1335 switch (size) {
1303 case 8 | 1: 1336 case 8 | 1:
1304 reg->val = I915_READ64_2x32(offset, offset+4); 1337 reg->val = I915_READ64_2x32(offset_ldw, offset_udw);
1305 break; 1338 break;
1306 case 8: 1339 case 8:
1307 reg->val = I915_READ64(offset); 1340 reg->val = I915_READ64(offset_ldw);
1308 break; 1341 break;
1309 case 4: 1342 case 4:
1310 reg->val = I915_READ(offset); 1343 reg->val = I915_READ(offset_ldw);
1311 break; 1344 break;
1312 case 2: 1345 case 2:
1313 reg->val = I915_READ16(offset); 1346 reg->val = I915_READ16(offset_ldw);
1314 break; 1347 break;
1315 case 1: 1348 case 1:
1316 reg->val = I915_READ8(offset); 1349 reg->val = I915_READ8(offset_ldw);
1317 break; 1350 break;
1318 default: 1351 default:
1319 ret = -EINVAL; 1352 ret = -EINVAL;
@@ -1470,7 +1503,7 @@ static int gen6_do_reset(struct drm_device *dev)
1470} 1503}
1471 1504
1472static int wait_for_register(struct drm_i915_private *dev_priv, 1505static int wait_for_register(struct drm_i915_private *dev_priv,
1473 const u32 reg, 1506 i915_reg_t reg,
1474 const u32 mask, 1507 const u32 mask,
1475 const u32 value, 1508 const u32 value,
1476 const unsigned long timeout_ms) 1509 const unsigned long timeout_ms)
diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig
index 2b81a417cf29..35ca4f007839 100644
--- a/drivers/gpu/drm/imx/Kconfig
+++ b/drivers/gpu/drm/imx/Kconfig
@@ -10,15 +10,6 @@ config DRM_IMX
10 help 10 help
11 enable i.MX graphics support 11 enable i.MX graphics support
12 12
13config DRM_IMX_FB_HELPER
14 tristate "provide legacy framebuffer /dev/fb0"
15 select DRM_KMS_CMA_HELPER
16 depends on DRM_IMX
17 help
18 The DRM framework can provide a legacy /dev/fb0 framebuffer
19 for your device. This is necessary to get a framebuffer console
20 and also for applications using the legacy framebuffer API
21
22config DRM_IMX_PARALLEL_DISPLAY 13config DRM_IMX_PARALLEL_DISPLAY
23 tristate "Support for parallel displays" 14 tristate "Support for parallel displays"
24 select DRM_PANEL 15 select DRM_PANEL
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index 98605ea2ad9d..35fcf6b84537 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -251,7 +251,7 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
251 251
252 drm_encoder_helper_add(encoder, &dw_hdmi_imx_encoder_helper_funcs); 252 drm_encoder_helper_add(encoder, &dw_hdmi_imx_encoder_helper_funcs);
253 drm_encoder_init(drm, encoder, &dw_hdmi_imx_encoder_funcs, 253 drm_encoder_init(drm, encoder, &dw_hdmi_imx_encoder_funcs,
254 DRM_MODE_ENCODER_TMDS); 254 DRM_MODE_ENCODER_TMDS, NULL);
255 255
256 return dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data); 256 return dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data);
257} 257}
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 64f16ea779ef..09e20ea69419 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -49,8 +49,10 @@ struct imx_drm_crtc {
49 struct imx_drm_crtc_helper_funcs imx_drm_helper_funcs; 49 struct imx_drm_crtc_helper_funcs imx_drm_helper_funcs;
50}; 50};
51 51
52#if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION)
52static int legacyfb_depth = 16; 53static int legacyfb_depth = 16;
53module_param(legacyfb_depth, int, 0444); 54module_param(legacyfb_depth, int, 0444);
55#endif
54 56
55int imx_drm_crtc_id(struct imx_drm_crtc *crtc) 57int imx_drm_crtc_id(struct imx_drm_crtc *crtc)
56{ 58{
@@ -60,26 +62,19 @@ EXPORT_SYMBOL_GPL(imx_drm_crtc_id);
60 62
61static void imx_drm_driver_lastclose(struct drm_device *drm) 63static void imx_drm_driver_lastclose(struct drm_device *drm)
62{ 64{
63#if IS_ENABLED(CONFIG_DRM_IMX_FB_HELPER)
64 struct imx_drm_device *imxdrm = drm->dev_private; 65 struct imx_drm_device *imxdrm = drm->dev_private;
65 66
66 if (imxdrm->fbhelper) 67 drm_fbdev_cma_restore_mode(imxdrm->fbhelper);
67 drm_fbdev_cma_restore_mode(imxdrm->fbhelper);
68#endif
69} 68}
70 69
71static int imx_drm_driver_unload(struct drm_device *drm) 70static int imx_drm_driver_unload(struct drm_device *drm)
72{ 71{
73#if IS_ENABLED(CONFIG_DRM_IMX_FB_HELPER)
74 struct imx_drm_device *imxdrm = drm->dev_private; 72 struct imx_drm_device *imxdrm = drm->dev_private;
75#endif
76 73
77 drm_kms_helper_poll_fini(drm); 74 drm_kms_helper_poll_fini(drm);
78 75
79#if IS_ENABLED(CONFIG_DRM_IMX_FB_HELPER)
80 if (imxdrm->fbhelper) 76 if (imxdrm->fbhelper)
81 drm_fbdev_cma_fini(imxdrm->fbhelper); 77 drm_fbdev_cma_fini(imxdrm->fbhelper);
82#endif
83 78
84 component_unbind_all(drm->dev, drm); 79 component_unbind_all(drm->dev, drm);
85 80
@@ -215,11 +210,9 @@ EXPORT_SYMBOL_GPL(imx_drm_encoder_destroy);
215 210
216static void imx_drm_output_poll_changed(struct drm_device *drm) 211static void imx_drm_output_poll_changed(struct drm_device *drm)
217{ 212{
218#if IS_ENABLED(CONFIG_DRM_IMX_FB_HELPER)
219 struct imx_drm_device *imxdrm = drm->dev_private; 213 struct imx_drm_device *imxdrm = drm->dev_private;
220 214
221 drm_fbdev_cma_hotplug_event(imxdrm->fbhelper); 215 drm_fbdev_cma_hotplug_event(imxdrm->fbhelper);
222#endif
223} 216}
224 217
225static struct drm_mode_config_funcs imx_drm_mode_config_funcs = { 218static struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
@@ -308,7 +301,7 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
308 * The fb helper takes copies of key hardware information, so the 301 * The fb helper takes copies of key hardware information, so the
309 * crtcs/connectors/encoders must not change after this point. 302 * crtcs/connectors/encoders must not change after this point.
310 */ 303 */
311#if IS_ENABLED(CONFIG_DRM_IMX_FB_HELPER) 304#if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION)
312 if (legacyfb_depth != 16 && legacyfb_depth != 32) { 305 if (legacyfb_depth != 16 && legacyfb_depth != 32) {
313 dev_warn(drm->dev, "Invalid legacyfb_depth. Defaulting to 16bpp\n"); 306 dev_warn(drm->dev, "Invalid legacyfb_depth. Defaulting to 16bpp\n");
314 legacyfb_depth = 16; 307 legacyfb_depth = 16;
@@ -340,7 +333,7 @@ err_kms:
340 * imx_drm_add_crtc - add a new crtc 333 * imx_drm_add_crtc - add a new crtc
341 */ 334 */
342int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc, 335int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
343 struct imx_drm_crtc **new_crtc, 336 struct imx_drm_crtc **new_crtc, struct drm_plane *primary_plane,
344 const struct imx_drm_crtc_helper_funcs *imx_drm_helper_funcs, 337 const struct imx_drm_crtc_helper_funcs *imx_drm_helper_funcs,
345 struct device_node *port) 338 struct device_node *port)
346{ 339{
@@ -379,8 +372,8 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
379 drm_crtc_helper_add(crtc, 372 drm_crtc_helper_add(crtc,
380 imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs); 373 imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
381 374
382 drm_crtc_init(drm, crtc, 375 drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL,
383 imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs); 376 imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs, NULL);
384 377
385 return 0; 378 return 0;
386 379
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index 28e776d8d9d2..83284b4d4be1 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
@@ -9,6 +9,7 @@ struct drm_display_mode;
9struct drm_encoder; 9struct drm_encoder;
10struct drm_fbdev_cma; 10struct drm_fbdev_cma;
11struct drm_framebuffer; 11struct drm_framebuffer;
12struct drm_plane;
12struct imx_drm_crtc; 13struct imx_drm_crtc;
13struct platform_device; 14struct platform_device;
14 15
@@ -24,7 +25,7 @@ struct imx_drm_crtc_helper_funcs {
24}; 25};
25 26
26int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc, 27int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
27 struct imx_drm_crtc **new_crtc, 28 struct imx_drm_crtc **new_crtc, struct drm_plane *primary_plane,
28 const struct imx_drm_crtc_helper_funcs *imx_helper_funcs, 29 const struct imx_drm_crtc_helper_funcs *imx_helper_funcs,
29 struct device_node *port); 30 struct device_node *port);
30int imx_drm_remove_crtc(struct imx_drm_crtc *); 31int imx_drm_remove_crtc(struct imx_drm_crtc *);
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index abacc8f67469..c79a61b67ded 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -422,7 +422,7 @@ static int imx_ldb_register(struct drm_device *drm,
422 drm_encoder_helper_add(&imx_ldb_ch->encoder, 422 drm_encoder_helper_add(&imx_ldb_ch->encoder,
423 &imx_ldb_encoder_helper_funcs); 423 &imx_ldb_encoder_helper_funcs);
424 drm_encoder_init(drm, &imx_ldb_ch->encoder, &imx_ldb_encoder_funcs, 424 drm_encoder_init(drm, &imx_ldb_ch->encoder, &imx_ldb_encoder_funcs,
425 DRM_MODE_ENCODER_LVDS); 425 DRM_MODE_ENCODER_LVDS, NULL);
426 426
427 drm_connector_helper_add(&imx_ldb_ch->connector, 427 drm_connector_helper_add(&imx_ldb_ch->connector,
428 &imx_ldb_connector_helper_funcs); 428 &imx_ldb_connector_helper_funcs);
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index e671ad369416..e61a8fca77cd 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -508,7 +508,7 @@ static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve)
508 508
509 drm_encoder_helper_add(&tve->encoder, &imx_tve_encoder_helper_funcs); 509 drm_encoder_helper_add(&tve->encoder, &imx_tve_encoder_helper_funcs);
510 drm_encoder_init(drm, &tve->encoder, &imx_tve_encoder_funcs, 510 drm_encoder_init(drm, &tve->encoder, &imx_tve_encoder_funcs,
511 encoder_type); 511 encoder_type, NULL);
512 512
513 drm_connector_helper_add(&tve->connector, 513 drm_connector_helper_add(&tve->connector,
514 &imx_tve_connector_helper_funcs); 514 &imx_tve_connector_helper_funcs);
@@ -721,6 +721,7 @@ static const struct of_device_id imx_tve_dt_ids[] = {
721 { .compatible = "fsl,imx53-tve", }, 721 { .compatible = "fsl,imx53-tve", },
722 { /* sentinel */ } 722 { /* sentinel */ }
723}; 723};
724MODULE_DEVICE_TABLE(of, imx_tve_dt_ids);
724 725
725static struct platform_driver imx_tve_driver = { 726static struct platform_driver imx_tve_driver = {
726 .probe = imx_tve_probe, 727 .probe = imx_tve_probe,
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 7bc8301fafff..4ab841eebee1 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -212,7 +212,8 @@ static void ipu_crtc_handle_pageflip(struct ipu_crtc *ipu_crtc)
212 212
213 spin_lock_irqsave(&drm->event_lock, flags); 213 spin_lock_irqsave(&drm->event_lock, flags);
214 if (ipu_crtc->page_flip_event) 214 if (ipu_crtc->page_flip_event)
215 drm_send_vblank_event(drm, -1, ipu_crtc->page_flip_event); 215 drm_crtc_send_vblank_event(&ipu_crtc->base,
216 ipu_crtc->page_flip_event);
216 ipu_crtc->page_flip_event = NULL; 217 ipu_crtc->page_flip_event = NULL;
217 imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc); 218 imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc);
218 spin_unlock_irqrestore(&drm->event_lock, flags); 219 spin_unlock_irqrestore(&drm->event_lock, flags);
@@ -349,7 +350,6 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
349 struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent); 350 struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
350 int dp = -EINVAL; 351 int dp = -EINVAL;
351 int ret; 352 int ret;
352 int id;
353 353
354 ret = ipu_get_resources(ipu_crtc, pdata); 354 ret = ipu_get_resources(ipu_crtc, pdata);
355 if (ret) { 355 if (ret) {
@@ -358,18 +358,23 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
358 return ret; 358 return ret;
359 } 359 }
360 360
361 if (pdata->dp >= 0)
362 dp = IPU_DP_FLOW_SYNC_BG;
363 ipu_crtc->plane[0] = ipu_plane_init(drm, ipu, pdata->dma[0], dp, 0,
364 DRM_PLANE_TYPE_PRIMARY);
365 if (IS_ERR(ipu_crtc->plane[0])) {
366 ret = PTR_ERR(ipu_crtc->plane[0]);
367 goto err_put_resources;
368 }
369
361 ret = imx_drm_add_crtc(drm, &ipu_crtc->base, &ipu_crtc->imx_crtc, 370 ret = imx_drm_add_crtc(drm, &ipu_crtc->base, &ipu_crtc->imx_crtc,
362 &ipu_crtc_helper_funcs, ipu_crtc->dev->of_node); 371 &ipu_crtc->plane[0]->base, &ipu_crtc_helper_funcs,
372 ipu_crtc->dev->of_node);
363 if (ret) { 373 if (ret) {
364 dev_err(ipu_crtc->dev, "adding crtc failed with %d.\n", ret); 374 dev_err(ipu_crtc->dev, "adding crtc failed with %d.\n", ret);
365 goto err_put_resources; 375 goto err_put_resources;
366 } 376 }
367 377
368 if (pdata->dp >= 0)
369 dp = IPU_DP_FLOW_SYNC_BG;
370 id = imx_drm_crtc_id(ipu_crtc->imx_crtc);
371 ipu_crtc->plane[0] = ipu_plane_init(ipu_crtc->base.dev, ipu,
372 pdata->dma[0], dp, BIT(id), true);
373 ret = ipu_plane_get_resources(ipu_crtc->plane[0]); 378 ret = ipu_plane_get_resources(ipu_crtc->plane[0]);
374 if (ret) { 379 if (ret) {
375 dev_err(ipu_crtc->dev, "getting plane 0 resources failed with %d.\n", 380 dev_err(ipu_crtc->dev, "getting plane 0 resources failed with %d.\n",
@@ -379,10 +384,10 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
379 384
380 /* If this crtc is using the DP, add an overlay plane */ 385 /* If this crtc is using the DP, add an overlay plane */
381 if (pdata->dp >= 0 && pdata->dma[1] > 0) { 386 if (pdata->dp >= 0 && pdata->dma[1] > 0) {
382 ipu_crtc->plane[1] = ipu_plane_init(ipu_crtc->base.dev, ipu, 387 ipu_crtc->plane[1] = ipu_plane_init(drm, ipu, pdata->dma[1],
383 pdata->dma[1], 388 IPU_DP_FLOW_SYNC_FG,
384 IPU_DP_FLOW_SYNC_FG, 389 drm_crtc_mask(&ipu_crtc->base),
385 BIT(id), false); 390 DRM_PLANE_TYPE_OVERLAY);
386 if (IS_ERR(ipu_crtc->plane[1])) 391 if (IS_ERR(ipu_crtc->plane[1]))
387 ipu_crtc->plane[1] = NULL; 392 ipu_crtc->plane[1] = NULL;
388 } 393 }
@@ -407,28 +412,6 @@ err_put_resources:
407 return ret; 412 return ret;
408} 413}
409 414
410static struct device_node *ipu_drm_get_port_by_id(struct device_node *parent,
411 int port_id)
412{
413 struct device_node *port;
414 int id, ret;
415
416 port = of_get_child_by_name(parent, "port");
417 while (port) {
418 ret = of_property_read_u32(port, "reg", &id);
419 if (!ret && id == port_id)
420 return port;
421
422 do {
423 port = of_get_next_child(parent, port);
424 if (!port)
425 return NULL;
426 } while (of_node_cmp(port->name, "port"));
427 }
428
429 return NULL;
430}
431
432static int ipu_drm_bind(struct device *dev, struct device *master, void *data) 415static int ipu_drm_bind(struct device *dev, struct device *master, void *data)
433{ 416{
434 struct ipu_client_platformdata *pdata = dev->platform_data; 417 struct ipu_client_platformdata *pdata = dev->platform_data;
@@ -470,23 +453,11 @@ static const struct component_ops ipu_crtc_ops = {
470static int ipu_drm_probe(struct platform_device *pdev) 453static int ipu_drm_probe(struct platform_device *pdev)
471{ 454{
472 struct device *dev = &pdev->dev; 455 struct device *dev = &pdev->dev;
473 struct ipu_client_platformdata *pdata = dev->platform_data;
474 int ret; 456 int ret;
475 457
476 if (!dev->platform_data) 458 if (!dev->platform_data)
477 return -EINVAL; 459 return -EINVAL;
478 460
479 if (!dev->of_node) {
480 /* Associate crtc device with the corresponding DI port node */
481 dev->of_node = ipu_drm_get_port_by_id(dev->parent->of_node,
482 pdata->di + 2);
483 if (!dev->of_node) {
484 dev_err(dev, "missing port@%d node in %s\n",
485 pdata->di + 2, dev->parent->of_node->full_name);
486 return -ENODEV;
487 }
488 }
489
490 ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); 461 ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
491 if (ret) 462 if (ret)
492 return ret; 463 return ret;
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 575f4c84388f..591ba2f1ae03 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -381,7 +381,7 @@ static struct drm_plane_funcs ipu_plane_funcs = {
381 381
382struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu, 382struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
383 int dma, int dp, unsigned int possible_crtcs, 383 int dma, int dp, unsigned int possible_crtcs,
384 bool priv) 384 enum drm_plane_type type)
385{ 385{
386 struct ipu_plane *ipu_plane; 386 struct ipu_plane *ipu_plane;
387 int ret; 387 int ret;
@@ -399,10 +399,10 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
399 ipu_plane->dma = dma; 399 ipu_plane->dma = dma;
400 ipu_plane->dp_flow = dp; 400 ipu_plane->dp_flow = dp;
401 401
402 ret = drm_plane_init(dev, &ipu_plane->base, possible_crtcs, 402 ret = drm_universal_plane_init(dev, &ipu_plane->base, possible_crtcs,
403 &ipu_plane_funcs, ipu_plane_formats, 403 &ipu_plane_funcs, ipu_plane_formats,
404 ARRAY_SIZE(ipu_plane_formats), 404 ARRAY_SIZE(ipu_plane_formats), type,
405 priv); 405 NULL);
406 if (ret) { 406 if (ret) {
407 DRM_ERROR("failed to initialize plane\n"); 407 DRM_ERROR("failed to initialize plane\n");
408 kfree(ipu_plane); 408 kfree(ipu_plane);
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3-plane.h
index 9b5eff18f5b8..3a443b413c60 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.h
+++ b/drivers/gpu/drm/imx/ipuv3-plane.h
@@ -34,7 +34,7 @@ struct ipu_plane {
34 34
35struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu, 35struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
36 int dma, int dp, unsigned int possible_crtcs, 36 int dma, int dp, unsigned int possible_crtcs,
37 bool priv); 37 enum drm_plane_type type);
38 38
39/* Init IDMAC, DMFC, DP */ 39/* Init IDMAC, DMFC, DP */
40int ipu_plane_mode_set(struct ipu_plane *plane, struct drm_crtc *crtc, 40int ipu_plane_mode_set(struct ipu_plane *plane, struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index b4deb9cf9d71..fcbe4d2eeabf 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -54,7 +54,11 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
54 54
55 if (imxpd->panel && imxpd->panel->funcs && 55 if (imxpd->panel && imxpd->panel->funcs &&
56 imxpd->panel->funcs->get_modes) { 56 imxpd->panel->funcs->get_modes) {
57 struct drm_display_info *di = &connector->display_info;
58
57 num_modes = imxpd->panel->funcs->get_modes(imxpd->panel); 59 num_modes = imxpd->panel->funcs->get_modes(imxpd->panel);
60 if (!imxpd->bus_format && di->num_bus_formats)
61 imxpd->bus_format = di->bus_formats[0];
58 if (num_modes > 0) 62 if (num_modes > 0)
59 return num_modes; 63 return num_modes;
60 } 64 }
@@ -188,7 +192,7 @@ static int imx_pd_register(struct drm_device *drm,
188 192
189 drm_encoder_helper_add(&imxpd->encoder, &imx_pd_encoder_helper_funcs); 193 drm_encoder_helper_add(&imxpd->encoder, &imx_pd_encoder_helper_funcs);
190 drm_encoder_init(drm, &imxpd->encoder, &imx_pd_encoder_funcs, 194 drm_encoder_init(drm, &imxpd->encoder, &imx_pd_encoder_funcs,
191 DRM_MODE_ENCODER_NONE); 195 DRM_MODE_ENCODER_NONE, NULL);
192 196
193 drm_connector_helper_add(&imxpd->connector, 197 drm_connector_helper_add(&imxpd->connector,
194 &imx_pd_connector_helper_funcs); 198 &imx_pd_connector_helper_funcs);
diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c
index 4f2068fe5d88..a7bf6a90eae5 100644
--- a/drivers/gpu/drm/mgag200/mgag200_cursor.c
+++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c
@@ -70,6 +70,11 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
70 BUG_ON(pixels_2 != pixels_current && pixels_2 != pixels_prev); 70 BUG_ON(pixels_2 != pixels_current && pixels_2 != pixels_prev);
71 BUG_ON(pixels_current == pixels_prev); 71 BUG_ON(pixels_current == pixels_prev);
72 72
73 if (!handle || !file_priv) {
74 mga_hide_cursor(mdev);
75 return 0;
76 }
77
73 obj = drm_gem_object_lookup(dev, file_priv, handle); 78 obj = drm_gem_object_lookup(dev, file_priv, handle);
74 if (!obj) 79 if (!obj)
75 return -ENOENT; 80 return -ENOENT;
@@ -88,12 +93,6 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
88 goto out_unreserve1; 93 goto out_unreserve1;
89 } 94 }
90 95
91 if (!handle) {
92 mga_hide_cursor(mdev);
93 ret = 0;
94 goto out1;
95 }
96
97 /* Move cursor buffers into VRAM if they aren't already */ 96 /* Move cursor buffers into VRAM if they aren't already */
98 if (!pixels_1->pin_count) { 97 if (!pixels_1->pin_count) {
99 ret = mgag200_bo_pin(pixels_1, TTM_PL_FLAG_VRAM, 98 ret = mgag200_bo_pin(pixels_1, TTM_PL_FLAG_VRAM,
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 912151c36d59..205b2801d3b8 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -252,7 +252,7 @@ void mgag200_fbdev_fini(struct mga_device *mdev);
252 /* mgag200_main.c */ 252 /* mgag200_main.c */
253int mgag200_framebuffer_init(struct drm_device *dev, 253int mgag200_framebuffer_init(struct drm_device *dev,
254 struct mga_framebuffer *mfb, 254 struct mga_framebuffer *mfb,
255 struct drm_mode_fb_cmd2 *mode_cmd, 255 const struct drm_mode_fb_cmd2 *mode_cmd,
256 struct drm_gem_object *obj); 256 struct drm_gem_object *obj);
257 257
258 258
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index b35b5b2db4ec..d9b04b008feb 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -138,7 +138,7 @@ static struct fb_ops mgag200fb_ops = {
138}; 138};
139 139
140static int mgag200fb_create_object(struct mga_fbdev *afbdev, 140static int mgag200fb_create_object(struct mga_fbdev *afbdev,
141 struct drm_mode_fb_cmd2 *mode_cmd, 141 const struct drm_mode_fb_cmd2 *mode_cmd,
142 struct drm_gem_object **gobj_p) 142 struct drm_gem_object **gobj_p)
143{ 143{
144 struct drm_device *dev = afbdev->helper.dev; 144 struct drm_device *dev = afbdev->helper.dev;
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index b1a0f5656175..9147444d5bf2 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -29,7 +29,7 @@ static const struct drm_framebuffer_funcs mga_fb_funcs = {
29 29
30int mgag200_framebuffer_init(struct drm_device *dev, 30int mgag200_framebuffer_init(struct drm_device *dev,
31 struct mga_framebuffer *gfb, 31 struct mga_framebuffer *gfb,
32 struct drm_mode_fb_cmd2 *mode_cmd, 32 const struct drm_mode_fb_cmd2 *mode_cmd,
33 struct drm_gem_object *obj) 33 struct drm_gem_object *obj)
34{ 34{
35 int ret; 35 int ret;
@@ -47,7 +47,7 @@ int mgag200_framebuffer_init(struct drm_device *dev,
47static struct drm_framebuffer * 47static struct drm_framebuffer *
48mgag200_user_framebuffer_create(struct drm_device *dev, 48mgag200_user_framebuffer_create(struct drm_device *dev,
49 struct drm_file *filp, 49 struct drm_file *filp,
50 struct drm_mode_fb_cmd2 *mode_cmd) 50 const struct drm_mode_fb_cmd2 *mode_cmd)
51{ 51{
52 struct drm_gem_object *obj; 52 struct drm_gem_object *obj;
53 struct mga_framebuffer *mga_fb; 53 struct mga_framebuffer *mga_fb;
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index c99d3fe12881..31802128dfbb 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1538,7 +1538,7 @@ static struct drm_encoder *mga_encoder_init(struct drm_device *dev)
1538 encoder->possible_crtcs = 0x1; 1538 encoder->possible_crtcs = 0x1;
1539 1539
1540 drm_encoder_init(dev, encoder, &mga_encoder_encoder_funcs, 1540 drm_encoder_init(dev, encoder, &mga_encoder_encoder_funcs,
1541 DRM_MODE_ENCODER_DAC); 1541 DRM_MODE_ENCODER_DAC, NULL);
1542 drm_encoder_helper_add(encoder, &mga_encoder_helper_funcs); 1542 drm_encoder_helper_add(encoder, &mga_encoder_helper_funcs);
1543 1543
1544 return encoder; 1544 return encoder;
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 84d3ec98e6b9..215495c2780c 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -54,3 +54,11 @@ config DRM_MSM_DSI_20NM_PHY
54 default y 54 default y
55 help 55 help
56 Choose this option if the 20nm DSI PHY is used on the platform. 56 Choose this option if the 20nm DSI PHY is used on the platform.
57
58config DRM_MSM_DSI_28NM_8960_PHY
59 bool "Enable DSI 28nm 8960 PHY driver in MSM DRM"
60 depends on DRM_MSM_DSI
61 default y
62 help
63 Choose this option if the 28nm DSI PHY 8960 variant is used on the
64 platform.
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 1c90290be716..065ad4138799 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -54,6 +54,7 @@ msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
54msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o 54msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
55 55
56msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \ 56msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
57 mdp/mdp4/mdp4_dsi_encoder.o \
57 dsi/dsi_cfg.o \ 58 dsi/dsi_cfg.o \
58 dsi/dsi_host.o \ 59 dsi/dsi_host.o \
59 dsi/dsi_manager.o \ 60 dsi/dsi_manager.o \
@@ -62,10 +63,12 @@ msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
62 63
63msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o 64msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o
64msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o 65msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o
66msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/phy/dsi_phy_28nm_8960.o
65 67
66ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y) 68ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y)
67msm-y += dsi/pll/dsi_pll.o 69msm-y += dsi/pll/dsi_pll.o
68msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o 70msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o
71msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/pll/dsi_pll_28nm_8960.o
69endif 72endif
70 73
71obj-$(CONFIG_DRM_MSM) += msm.o 74obj-$(CONFIG_DRM_MSM) += msm.o
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 1ea2df524fac..950d27d26b30 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -19,10 +19,6 @@
19 19
20#include "adreno_gpu.h" 20#include "adreno_gpu.h"
21 21
22#if defined(DOWNSTREAM_CONFIG_MSM_BUS_SCALING) && !defined(CONFIG_OF)
23# include <mach/kgsl.h>
24#endif
25
26#define ANY_ID 0xff 22#define ANY_ID 0xff
27 23
28bool hang_debug = false; 24bool hang_debug = false;
@@ -168,7 +164,6 @@ static void set_gpu_pdev(struct drm_device *dev,
168static int adreno_bind(struct device *dev, struct device *master, void *data) 164static int adreno_bind(struct device *dev, struct device *master, void *data)
169{ 165{
170 static struct adreno_platform_config config = {}; 166 static struct adreno_platform_config config = {};
171#ifdef CONFIG_OF
172 struct device_node *child, *node = dev->of_node; 167 struct device_node *child, *node = dev->of_node;
173 u32 val; 168 u32 val;
174 int ret; 169 int ret;
@@ -205,53 +200,6 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
205 return -ENXIO; 200 return -ENXIO;
206 } 201 }
207 202
208#else
209 struct kgsl_device_platform_data *pdata = dev->platform_data;
210 uint32_t version = socinfo_get_version();
211 if (cpu_is_apq8064ab()) {
212 config.fast_rate = 450000000;
213 config.slow_rate = 27000000;
214 config.bus_freq = 4;
215 config.rev = ADRENO_REV(3, 2, 1, 0);
216 } else if (cpu_is_apq8064()) {
217 config.fast_rate = 400000000;
218 config.slow_rate = 27000000;
219 config.bus_freq = 4;
220
221 if (SOCINFO_VERSION_MAJOR(version) == 2)
222 config.rev = ADRENO_REV(3, 2, 0, 2);
223 else if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
224 (SOCINFO_VERSION_MINOR(version) == 1))
225 config.rev = ADRENO_REV(3, 2, 0, 1);
226 else
227 config.rev = ADRENO_REV(3, 2, 0, 0);
228
229 } else if (cpu_is_msm8960ab()) {
230 config.fast_rate = 400000000;
231 config.slow_rate = 320000000;
232 config.bus_freq = 4;
233
234 if (SOCINFO_VERSION_MINOR(version) == 0)
235 config.rev = ADRENO_REV(3, 2, 1, 0);
236 else
237 config.rev = ADRENO_REV(3, 2, 1, 1);
238
239 } else if (cpu_is_msm8930()) {
240 config.fast_rate = 400000000;
241 config.slow_rate = 27000000;
242 config.bus_freq = 3;
243
244 if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
245 (SOCINFO_VERSION_MINOR(version) == 2))
246 config.rev = ADRENO_REV(3, 0, 5, 2);
247 else
248 config.rev = ADRENO_REV(3, 0, 5, 0);
249
250 }
251# ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
252 config.bus_scale_table = pdata->bus_scale_table;
253# endif
254#endif
255 dev->platform_data = &config; 203 dev->platform_data = &config;
256 set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev)); 204 set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev));
257 return 0; 205 return 0;
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 5f5a3732cdf6..749fbb28ec3d 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -31,10 +31,12 @@ enum msm_dsi_phy_type {
31 MSM_DSI_PHY_28NM_HPM, 31 MSM_DSI_PHY_28NM_HPM,
32 MSM_DSI_PHY_28NM_LP, 32 MSM_DSI_PHY_28NM_LP,
33 MSM_DSI_PHY_20NM, 33 MSM_DSI_PHY_20NM,
34 MSM_DSI_PHY_28NM_8960,
34 MSM_DSI_PHY_MAX 35 MSM_DSI_PHY_MAX
35}; 36};
36 37
37#define DSI_DEV_REGULATOR_MAX 8 38#define DSI_DEV_REGULATOR_MAX 8
39#define DSI_BUS_CLK_MAX 4
38 40
39/* Regulators for DSI devices */ 41/* Regulators for DSI devices */
40struct dsi_reg_entry { 42struct dsi_reg_entry {
@@ -89,7 +91,7 @@ int msm_dsi_manager_phy_enable(int id,
89 u32 *clk_pre, u32 *clk_post); 91 u32 *clk_pre, u32 *clk_post);
90void msm_dsi_manager_phy_disable(int id); 92void msm_dsi_manager_phy_disable(int id);
91int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg); 93int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
92bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len); 94bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len);
93int msm_dsi_manager_register(struct msm_dsi *msm_dsi); 95int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
94void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi); 96void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
95 97
@@ -143,7 +145,7 @@ int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
143int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host, 145int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
144 const struct mipi_dsi_msg *msg); 146 const struct mipi_dsi_msg *msg);
145void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, 147void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host,
146 u32 iova, u32 len); 148 u32 dma_base, u32 len);
147int msm_dsi_host_enable(struct mipi_dsi_host *host); 149int msm_dsi_host_enable(struct mipi_dsi_host *host);
148int msm_dsi_host_disable(struct mipi_dsi_host *host); 150int msm_dsi_host_disable(struct mipi_dsi_host *host);
149int msm_dsi_host_power_on(struct mipi_dsi_host *host); 151int msm_dsi_host_power_on(struct mipi_dsi_host *host);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 5872d5e5934f..2a827d8093a2 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -13,9 +13,26 @@
13 13
14#include "dsi_cfg.h" 14#include "dsi_cfg.h"
15 15
16/* DSI v2 has not been supported by now */ 16static const char * const dsi_v2_bus_clk_names[] = {
17static const struct msm_dsi_config dsi_v2_cfg = { 17 "core_mmss_clk", "iface_clk", "bus_clk",
18};
19
20static const struct msm_dsi_config apq8064_dsi_cfg = {
18 .io_offset = 0, 21 .io_offset = 0,
22 .reg_cfg = {
23 .num = 3,
24 .regs = {
25 {"vdda", 1200000, 1200000, 100000, 100},
26 {"avdd", 3000000, 3000000, 110000, 100},
27 {"vddio", 1800000, 1800000, 100000, 100},
28 },
29 },
30 .bus_clk_names = dsi_v2_bus_clk_names,
31 .num_bus_clks = ARRAY_SIZE(dsi_v2_bus_clk_names),
32};
33
34static const char * const dsi_6g_bus_clk_names[] = {
35 "mdp_core_clk", "iface_clk", "bus_clk", "core_mmss_clk",
19}; 36};
20 37
21static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = { 38static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
@@ -29,6 +46,12 @@ static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
29 {"vddio", 1800000, 1800000, 100000, 100}, 46 {"vddio", 1800000, 1800000, 100000, 100},
30 }, 47 },
31 }, 48 },
49 .bus_clk_names = dsi_6g_bus_clk_names,
50 .num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names),
51};
52
53static const char * const dsi_8916_bus_clk_names[] = {
54 "mdp_core_clk", "iface_clk", "bus_clk",
32}; 55};
33 56
34static const struct msm_dsi_config msm8916_dsi_cfg = { 57static const struct msm_dsi_config msm8916_dsi_cfg = {
@@ -42,6 +65,8 @@ static const struct msm_dsi_config msm8916_dsi_cfg = {
42 {"vddio", 1800000, 1800000, 100000, 100}, 65 {"vddio", 1800000, 1800000, 100000, 100},
43 }, 66 },
44 }, 67 },
68 .bus_clk_names = dsi_8916_bus_clk_names,
69 .num_bus_clks = ARRAY_SIZE(dsi_8916_bus_clk_names),
45}; 70};
46 71
47static const struct msm_dsi_config msm8994_dsi_cfg = { 72static const struct msm_dsi_config msm8994_dsi_cfg = {
@@ -57,11 +82,13 @@ static const struct msm_dsi_config msm8994_dsi_cfg = {
57 {"lab_reg", -1, -1, -1, -1}, 82 {"lab_reg", -1, -1, -1, -1},
58 {"ibb_reg", -1, -1, -1, -1}, 83 {"ibb_reg", -1, -1, -1, -1},
59 }, 84 },
60 } 85 },
86 .bus_clk_names = dsi_6g_bus_clk_names,
87 .num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names),
61}; 88};
62 89
63static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = { 90static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
64 {MSM_DSI_VER_MAJOR_V2, U32_MAX, &dsi_v2_cfg}, 91 {MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064, &apq8064_dsi_cfg},
65 {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0, 92 {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0,
66 &msm8974_apq8084_dsi_cfg}, 93 &msm8974_apq8084_dsi_cfg},
67 {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1, 94 {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1,
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index 4cf887240177..a68c836744a3 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -25,11 +25,15 @@
25#define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000 25#define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000
26#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001 26#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001
27 27
28#define MSM_DSI_V2_VER_MINOR_8064 0x0
29
28#define DSI_6G_REG_SHIFT 4 30#define DSI_6G_REG_SHIFT 4
29 31
30struct msm_dsi_config { 32struct msm_dsi_config {
31 u32 io_offset; 33 u32 io_offset;
32 struct dsi_reg_config reg_cfg; 34 struct dsi_reg_config reg_cfg;
35 const char * const *bus_clk_names;
36 const int num_bus_clks;
33}; 37};
34 38
35struct msm_dsi_cfg_handler { 39struct msm_dsi_cfg_handler {
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 4c49868efcda..48f9967b4a1b 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -24,26 +24,36 @@
24#include <linux/of_graph.h> 24#include <linux/of_graph.h>
25#include <linux/regulator/consumer.h> 25#include <linux/regulator/consumer.h>
26#include <linux/spinlock.h> 26#include <linux/spinlock.h>
27#include <linux/mfd/syscon.h>
28#include <linux/regmap.h>
27#include <video/mipi_display.h> 29#include <video/mipi_display.h>
28 30
29#include "dsi.h" 31#include "dsi.h"
30#include "dsi.xml.h" 32#include "dsi.xml.h"
33#include "sfpb.xml.h"
31#include "dsi_cfg.h" 34#include "dsi_cfg.h"
32 35
33static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor) 36static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
34{ 37{
35 u32 ver; 38 u32 ver;
36 u32 ver_6g;
37 39
38 if (!major || !minor) 40 if (!major || !minor)
39 return -EINVAL; 41 return -EINVAL;
40 42
41 /* From DSI6G(v3), addition of a 6G_HW_VERSION register at offset 0 43 /*
44 * From DSI6G(v3), addition of a 6G_HW_VERSION register at offset 0
42 * makes all other registers 4-byte shifted down. 45 * makes all other registers 4-byte shifted down.
46 *
47 * In order to identify between DSI6G(v3) and beyond, and DSIv2 and
48 * older, we read the DSI_VERSION register without any shift(offset
49 * 0x1f0). In the case of DSIv2, this hast to be a non-zero value. In
50 * the case of DSI6G, this has to be zero (the offset points to a
51 * scratch register which we never touch)
43 */ 52 */
44 ver_6g = msm_readl(base + REG_DSI_6G_HW_VERSION); 53
45 if (ver_6g == 0) { 54 ver = msm_readl(base + REG_DSI_VERSION);
46 ver = msm_readl(base + REG_DSI_VERSION); 55 if (ver) {
56 /* older dsi host, there is no register shift */
47 ver = FIELD(ver, DSI_VERSION_MAJOR); 57 ver = FIELD(ver, DSI_VERSION_MAJOR);
48 if (ver <= MSM_DSI_VER_MAJOR_V2) { 58 if (ver <= MSM_DSI_VER_MAJOR_V2) {
49 /* old versions */ 59 /* old versions */
@@ -54,12 +64,17 @@ static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
54 return -EINVAL; 64 return -EINVAL;
55 } 65 }
56 } else { 66 } else {
67 /*
68 * newer host, offset 0 has 6G_HW_VERSION, the rest of the
69 * registers are shifted down, read DSI_VERSION again with
70 * the shifted offset
71 */
57 ver = msm_readl(base + DSI_6G_REG_SHIFT + REG_DSI_VERSION); 72 ver = msm_readl(base + DSI_6G_REG_SHIFT + REG_DSI_VERSION);
58 ver = FIELD(ver, DSI_VERSION_MAJOR); 73 ver = FIELD(ver, DSI_VERSION_MAJOR);
59 if (ver == MSM_DSI_VER_MAJOR_6G) { 74 if (ver == MSM_DSI_VER_MAJOR_6G) {
60 /* 6G version */ 75 /* 6G version */
61 *major = ver; 76 *major = ver;
62 *minor = ver_6g; 77 *minor = msm_readl(base + REG_DSI_6G_HW_VERSION);
63 return 0; 78 return 0;
64 } else { 79 } else {
65 return -EINVAL; 80 return -EINVAL;
@@ -91,10 +106,9 @@ struct msm_dsi_host {
91 106
92 void __iomem *ctrl_base; 107 void __iomem *ctrl_base;
93 struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX]; 108 struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
94 struct clk *mdp_core_clk; 109
95 struct clk *ahb_clk; 110 struct clk *bus_clks[DSI_BUS_CLK_MAX];
96 struct clk *axi_clk; 111
97 struct clk *mmss_misc_ahb_clk;
98 struct clk *byte_clk; 112 struct clk *byte_clk;
99 struct clk *esc_clk; 113 struct clk *esc_clk;
100 struct clk *pixel_clk; 114 struct clk *pixel_clk;
@@ -102,6 +116,14 @@ struct msm_dsi_host {
102 struct clk *pixel_clk_src; 116 struct clk *pixel_clk_src;
103 117
104 u32 byte_clk_rate; 118 u32 byte_clk_rate;
119 u32 esc_clk_rate;
120
121 /* DSI v2 specific clocks */
122 struct clk *src_clk;
123 struct clk *esc_clk_src;
124 struct clk *dsi_clk_src;
125
126 u32 src_clk_rate;
105 127
106 struct gpio_desc *disp_en_gpio; 128 struct gpio_desc *disp_en_gpio;
107 struct gpio_desc *te_gpio; 129 struct gpio_desc *te_gpio;
@@ -119,9 +141,19 @@ struct msm_dsi_host {
119 struct work_struct err_work; 141 struct work_struct err_work;
120 struct workqueue_struct *workqueue; 142 struct workqueue_struct *workqueue;
121 143
144 /* DSI 6G TX buffer*/
122 struct drm_gem_object *tx_gem_obj; 145 struct drm_gem_object *tx_gem_obj;
146
147 /* DSI v2 TX buffer */
148 void *tx_buf;
149 dma_addr_t tx_buf_paddr;
150
151 int tx_size;
152
123 u8 *rx_buf; 153 u8 *rx_buf;
124 154
155 struct regmap *sfpb;
156
125 struct drm_display_mode *mode; 157 struct drm_display_mode *mode;
126 158
127 /* connected device info */ 159 /* connected device info */
@@ -165,21 +197,31 @@ static const struct msm_dsi_cfg_handler *dsi_get_config(
165 struct msm_dsi_host *msm_host) 197 struct msm_dsi_host *msm_host)
166{ 198{
167 const struct msm_dsi_cfg_handler *cfg_hnd = NULL; 199 const struct msm_dsi_cfg_handler *cfg_hnd = NULL;
200 struct device *dev = &msm_host->pdev->dev;
168 struct regulator *gdsc_reg; 201 struct regulator *gdsc_reg;
202 struct clk *ahb_clk;
169 int ret; 203 int ret;
170 u32 major = 0, minor = 0; 204 u32 major = 0, minor = 0;
171 205
172 gdsc_reg = regulator_get(&msm_host->pdev->dev, "gdsc"); 206 gdsc_reg = regulator_get(dev, "gdsc");
173 if (IS_ERR(gdsc_reg)) { 207 if (IS_ERR(gdsc_reg)) {
174 pr_err("%s: cannot get gdsc\n", __func__); 208 pr_err("%s: cannot get gdsc\n", __func__);
175 goto exit; 209 goto exit;
176 } 210 }
211
212 ahb_clk = clk_get(dev, "iface_clk");
213 if (IS_ERR(ahb_clk)) {
214 pr_err("%s: cannot get interface clock\n", __func__);
215 goto put_gdsc;
216 }
217
177 ret = regulator_enable(gdsc_reg); 218 ret = regulator_enable(gdsc_reg);
178 if (ret) { 219 if (ret) {
179 pr_err("%s: unable to enable gdsc\n", __func__); 220 pr_err("%s: unable to enable gdsc\n", __func__);
180 goto put_gdsc; 221 goto put_clk;
181 } 222 }
182 ret = clk_prepare_enable(msm_host->ahb_clk); 223
224 ret = clk_prepare_enable(ahb_clk);
183 if (ret) { 225 if (ret) {
184 pr_err("%s: unable to enable ahb_clk\n", __func__); 226 pr_err("%s: unable to enable ahb_clk\n", __func__);
185 goto disable_gdsc; 227 goto disable_gdsc;
@@ -196,9 +238,11 @@ static const struct msm_dsi_cfg_handler *dsi_get_config(
196 DBG("%s: Version %x:%x\n", __func__, major, minor); 238 DBG("%s: Version %x:%x\n", __func__, major, minor);
197 239
198disable_clks: 240disable_clks:
199 clk_disable_unprepare(msm_host->ahb_clk); 241 clk_disable_unprepare(ahb_clk);
200disable_gdsc: 242disable_gdsc:
201 regulator_disable(gdsc_reg); 243 regulator_disable(gdsc_reg);
244put_clk:
245 clk_put(ahb_clk);
202put_gdsc: 246put_gdsc:
203 regulator_put(gdsc_reg); 247 regulator_put(gdsc_reg);
204exit: 248exit:
@@ -295,40 +339,23 @@ static int dsi_regulator_init(struct msm_dsi_host *msm_host)
295static int dsi_clk_init(struct msm_dsi_host *msm_host) 339static int dsi_clk_init(struct msm_dsi_host *msm_host)
296{ 340{
297 struct device *dev = &msm_host->pdev->dev; 341 struct device *dev = &msm_host->pdev->dev;
298 int ret = 0; 342 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
299 343 const struct msm_dsi_config *cfg = cfg_hnd->cfg;
300 msm_host->mdp_core_clk = devm_clk_get(dev, "mdp_core_clk"); 344 int i, ret = 0;
301 if (IS_ERR(msm_host->mdp_core_clk)) { 345
302 ret = PTR_ERR(msm_host->mdp_core_clk); 346 /* get bus clocks */
303 pr_err("%s: Unable to get mdp core clk. ret=%d\n", 347 for (i = 0; i < cfg->num_bus_clks; i++) {
304 __func__, ret); 348 msm_host->bus_clks[i] = devm_clk_get(dev,
305 goto exit; 349 cfg->bus_clk_names[i]);
306 } 350 if (IS_ERR(msm_host->bus_clks[i])) {
307 351 ret = PTR_ERR(msm_host->bus_clks[i]);
308 msm_host->ahb_clk = devm_clk_get(dev, "iface_clk"); 352 pr_err("%s: Unable to get %s, ret = %d\n",
309 if (IS_ERR(msm_host->ahb_clk)) { 353 __func__, cfg->bus_clk_names[i], ret);
310 ret = PTR_ERR(msm_host->ahb_clk); 354 goto exit;
311 pr_err("%s: Unable to get mdss ahb clk. ret=%d\n", 355 }
312 __func__, ret);
313 goto exit;
314 }
315
316 msm_host->axi_clk = devm_clk_get(dev, "bus_clk");
317 if (IS_ERR(msm_host->axi_clk)) {
318 ret = PTR_ERR(msm_host->axi_clk);
319 pr_err("%s: Unable to get axi bus clk. ret=%d\n",
320 __func__, ret);
321 goto exit;
322 }
323
324 msm_host->mmss_misc_ahb_clk = devm_clk_get(dev, "core_mmss_clk");
325 if (IS_ERR(msm_host->mmss_misc_ahb_clk)) {
326 ret = PTR_ERR(msm_host->mmss_misc_ahb_clk);
327 pr_err("%s: Unable to get mmss misc ahb clk. ret=%d\n",
328 __func__, ret);
329 goto exit;
330 } 356 }
331 357
358 /* get link and source clocks */
332 msm_host->byte_clk = devm_clk_get(dev, "byte_clk"); 359 msm_host->byte_clk = devm_clk_get(dev, "byte_clk");
333 if (IS_ERR(msm_host->byte_clk)) { 360 if (IS_ERR(msm_host->byte_clk)) {
334 ret = PTR_ERR(msm_host->byte_clk); 361 ret = PTR_ERR(msm_host->byte_clk);
@@ -356,80 +383,85 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
356 goto exit; 383 goto exit;
357 } 384 }
358 385
359 msm_host->byte_clk_src = devm_clk_get(dev, "byte_clk_src"); 386 msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk);
360 if (IS_ERR(msm_host->byte_clk_src)) { 387 if (!msm_host->byte_clk_src) {
361 ret = PTR_ERR(msm_host->byte_clk_src); 388 ret = -ENODEV;
362 pr_err("%s: can't find byte_clk_src. ret=%d\n", __func__, ret); 389 pr_err("%s: can't find byte_clk_src. ret=%d\n", __func__, ret);
363 msm_host->byte_clk_src = NULL;
364 goto exit; 390 goto exit;
365 } 391 }
366 392
367 msm_host->pixel_clk_src = devm_clk_get(dev, "pixel_clk_src"); 393 msm_host->pixel_clk_src = clk_get_parent(msm_host->pixel_clk);
368 if (IS_ERR(msm_host->pixel_clk_src)) { 394 if (!msm_host->pixel_clk_src) {
369 ret = PTR_ERR(msm_host->pixel_clk_src); 395 ret = -ENODEV;
370 pr_err("%s: can't find pixel_clk_src. ret=%d\n", __func__, ret); 396 pr_err("%s: can't find pixel_clk_src. ret=%d\n", __func__, ret);
371 msm_host->pixel_clk_src = NULL;
372 goto exit; 397 goto exit;
373 } 398 }
374 399
400 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
401 msm_host->src_clk = devm_clk_get(dev, "src_clk");
402 if (IS_ERR(msm_host->src_clk)) {
403 ret = PTR_ERR(msm_host->src_clk);
404 pr_err("%s: can't find dsi_src_clk. ret=%d\n",
405 __func__, ret);
406 msm_host->src_clk = NULL;
407 goto exit;
408 }
409
410 msm_host->esc_clk_src = clk_get_parent(msm_host->esc_clk);
411 if (!msm_host->esc_clk_src) {
412 ret = -ENODEV;
413 pr_err("%s: can't get esc_clk_src. ret=%d\n",
414 __func__, ret);
415 goto exit;
416 }
417
418 msm_host->dsi_clk_src = clk_get_parent(msm_host->src_clk);
419 if (!msm_host->dsi_clk_src) {
420 ret = -ENODEV;
421 pr_err("%s: can't get dsi_clk_src. ret=%d\n",
422 __func__, ret);
423 }
424 }
375exit: 425exit:
376 return ret; 426 return ret;
377} 427}
378 428
379static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host) 429static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host)
380{ 430{
381 int ret; 431 const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
432 int i, ret;
382 433
383 DBG("id=%d", msm_host->id); 434 DBG("id=%d", msm_host->id);
384 435
385 ret = clk_prepare_enable(msm_host->mdp_core_clk); 436 for (i = 0; i < cfg->num_bus_clks; i++) {
386 if (ret) { 437 ret = clk_prepare_enable(msm_host->bus_clks[i]);
387 pr_err("%s: failed to enable mdp_core_clock, %d\n", 438 if (ret) {
388 __func__, ret); 439 pr_err("%s: failed to enable bus clock %d ret %d\n",
389 goto core_clk_err; 440 __func__, i, ret);
390 } 441 goto err;
391 442 }
392 ret = clk_prepare_enable(msm_host->ahb_clk);
393 if (ret) {
394 pr_err("%s: failed to enable ahb clock, %d\n", __func__, ret);
395 goto ahb_clk_err;
396 }
397
398 ret = clk_prepare_enable(msm_host->axi_clk);
399 if (ret) {
400 pr_err("%s: failed to enable ahb clock, %d\n", __func__, ret);
401 goto axi_clk_err;
402 }
403
404 ret = clk_prepare_enable(msm_host->mmss_misc_ahb_clk);
405 if (ret) {
406 pr_err("%s: failed to enable mmss misc ahb clk, %d\n",
407 __func__, ret);
408 goto misc_ahb_clk_err;
409 } 443 }
410 444
411 return 0; 445 return 0;
446err:
447 for (; i > 0; i--)
448 clk_disable_unprepare(msm_host->bus_clks[i]);
412 449
413misc_ahb_clk_err:
414 clk_disable_unprepare(msm_host->axi_clk);
415axi_clk_err:
416 clk_disable_unprepare(msm_host->ahb_clk);
417ahb_clk_err:
418 clk_disable_unprepare(msm_host->mdp_core_clk);
419core_clk_err:
420 return ret; 450 return ret;
421} 451}
422 452
423static void dsi_bus_clk_disable(struct msm_dsi_host *msm_host) 453static void dsi_bus_clk_disable(struct msm_dsi_host *msm_host)
424{ 454{
455 const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
456 int i;
457
425 DBG(""); 458 DBG("");
426 clk_disable_unprepare(msm_host->mmss_misc_ahb_clk); 459
427 clk_disable_unprepare(msm_host->axi_clk); 460 for (i = cfg->num_bus_clks - 1; i >= 0; i--)
428 clk_disable_unprepare(msm_host->ahb_clk); 461 clk_disable_unprepare(msm_host->bus_clks[i]);
429 clk_disable_unprepare(msm_host->mdp_core_clk);
430} 462}
431 463
432static int dsi_link_clk_enable(struct msm_dsi_host *msm_host) 464static int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
433{ 465{
434 int ret; 466 int ret;
435 467
@@ -476,11 +508,98 @@ error:
476 return ret; 508 return ret;
477} 509}
478 510
479static void dsi_link_clk_disable(struct msm_dsi_host *msm_host) 511static int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
480{ 512{
513 int ret;
514
515 DBG("Set clk rates: pclk=%d, byteclk=%d, esc_clk=%d, dsi_src_clk=%d",
516 msm_host->mode->clock, msm_host->byte_clk_rate,
517 msm_host->esc_clk_rate, msm_host->src_clk_rate);
518
519 ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
520 if (ret) {
521 pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
522 goto error;
523 }
524
525 ret = clk_set_rate(msm_host->esc_clk, msm_host->esc_clk_rate);
526 if (ret) {
527 pr_err("%s: Failed to set rate esc clk, %d\n", __func__, ret);
528 goto error;
529 }
530
531 ret = clk_set_rate(msm_host->src_clk, msm_host->src_clk_rate);
532 if (ret) {
533 pr_err("%s: Failed to set rate src clk, %d\n", __func__, ret);
534 goto error;
535 }
536
537 ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000);
538 if (ret) {
539 pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
540 goto error;
541 }
542
543 ret = clk_prepare_enable(msm_host->byte_clk);
544 if (ret) {
545 pr_err("%s: Failed to enable dsi byte clk\n", __func__);
546 goto error;
547 }
548
549 ret = clk_prepare_enable(msm_host->esc_clk);
550 if (ret) {
551 pr_err("%s: Failed to enable dsi esc clk\n", __func__);
552 goto esc_clk_err;
553 }
554
555 ret = clk_prepare_enable(msm_host->src_clk);
556 if (ret) {
557 pr_err("%s: Failed to enable dsi src clk\n", __func__);
558 goto src_clk_err;
559 }
560
561 ret = clk_prepare_enable(msm_host->pixel_clk);
562 if (ret) {
563 pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
564 goto pixel_clk_err;
565 }
566
567 return 0;
568
569pixel_clk_err:
570 clk_disable_unprepare(msm_host->src_clk);
571src_clk_err:
481 clk_disable_unprepare(msm_host->esc_clk); 572 clk_disable_unprepare(msm_host->esc_clk);
482 clk_disable_unprepare(msm_host->pixel_clk); 573esc_clk_err:
483 clk_disable_unprepare(msm_host->byte_clk); 574 clk_disable_unprepare(msm_host->byte_clk);
575error:
576 return ret;
577}
578
579static int dsi_link_clk_enable(struct msm_dsi_host *msm_host)
580{
581 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
582
583 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G)
584 return dsi_link_clk_enable_6g(msm_host);
585 else
586 return dsi_link_clk_enable_v2(msm_host);
587}
588
589static void dsi_link_clk_disable(struct msm_dsi_host *msm_host)
590{
591 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
592
593 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
594 clk_disable_unprepare(msm_host->esc_clk);
595 clk_disable_unprepare(msm_host->pixel_clk);
596 clk_disable_unprepare(msm_host->byte_clk);
597 } else {
598 clk_disable_unprepare(msm_host->pixel_clk);
599 clk_disable_unprepare(msm_host->src_clk);
600 clk_disable_unprepare(msm_host->esc_clk);
601 clk_disable_unprepare(msm_host->byte_clk);
602 }
484} 603}
485 604
486static int dsi_clk_ctrl(struct msm_dsi_host *msm_host, bool enable) 605static int dsi_clk_ctrl(struct msm_dsi_host *msm_host, bool enable)
@@ -515,6 +634,7 @@ unlock_ret:
515static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host) 634static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host)
516{ 635{
517 struct drm_display_mode *mode = msm_host->mode; 636 struct drm_display_mode *mode = msm_host->mode;
637 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
518 u8 lanes = msm_host->lanes; 638 u8 lanes = msm_host->lanes;
519 u32 bpp = dsi_get_bpp(msm_host->format); 639 u32 bpp = dsi_get_bpp(msm_host->format);
520 u32 pclk_rate; 640 u32 pclk_rate;
@@ -534,6 +654,47 @@ static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host)
534 654
535 DBG("pclk=%d, bclk=%d", pclk_rate, msm_host->byte_clk_rate); 655 DBG("pclk=%d, bclk=%d", pclk_rate, msm_host->byte_clk_rate);
536 656
657 msm_host->esc_clk_rate = clk_get_rate(msm_host->esc_clk);
658
659 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
660 unsigned int esc_mhz, esc_div;
661 unsigned long byte_mhz;
662
663 msm_host->src_clk_rate = (pclk_rate * bpp) / 8;
664
665 /*
666 * esc clock is byte clock followed by a 4 bit divider,
667 * we need to find an escape clock frequency within the
668 * mipi DSI spec range within the maximum divider limit
669 * We iterate here between an escape clock frequencey
670 * between 20 Mhz to 5 Mhz and pick up the first one
671 * that can be supported by our divider
672 */
673
674 byte_mhz = msm_host->byte_clk_rate / 1000000;
675
676 for (esc_mhz = 20; esc_mhz >= 5; esc_mhz--) {
677 esc_div = DIV_ROUND_UP(byte_mhz, esc_mhz);
678
679 /*
680 * TODO: Ideally, we shouldn't know what sort of divider
681 * is available in mmss_cc, we're just assuming that
682 * it'll always be a 4 bit divider. Need to come up with
683 * a better way here.
684 */
685 if (esc_div >= 1 && esc_div <= 16)
686 break;
687 }
688
689 if (esc_mhz < 5)
690 return -EINVAL;
691
692 msm_host->esc_clk_rate = msm_host->byte_clk_rate / esc_div;
693
694 DBG("esc=%d, src=%d", msm_host->esc_clk_rate,
695 msm_host->src_clk_rate);
696 }
697
537 return 0; 698 return 0;
538} 699}
539 700
@@ -835,29 +996,46 @@ static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
835static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size) 996static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
836{ 997{
837 struct drm_device *dev = msm_host->dev; 998 struct drm_device *dev = msm_host->dev;
999 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
838 int ret; 1000 int ret;
839 u32 iova; 1001 u32 iova;
840 1002
841 mutex_lock(&dev->struct_mutex); 1003 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
842 msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED); 1004 mutex_lock(&dev->struct_mutex);
843 if (IS_ERR(msm_host->tx_gem_obj)) { 1005 msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
844 ret = PTR_ERR(msm_host->tx_gem_obj); 1006 if (IS_ERR(msm_host->tx_gem_obj)) {
845 pr_err("%s: failed to allocate gem, %d\n", __func__, ret); 1007 ret = PTR_ERR(msm_host->tx_gem_obj);
846 msm_host->tx_gem_obj = NULL; 1008 pr_err("%s: failed to allocate gem, %d\n",
1009 __func__, ret);
1010 msm_host->tx_gem_obj = NULL;
1011 mutex_unlock(&dev->struct_mutex);
1012 return ret;
1013 }
1014
1015 ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj, 0, &iova);
847 mutex_unlock(&dev->struct_mutex); 1016 mutex_unlock(&dev->struct_mutex);
848 return ret; 1017 if (ret) {
849 } 1018 pr_err("%s: failed to get iova, %d\n", __func__, ret);
1019 return ret;
1020 }
850 1021
851 ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj, 0, &iova); 1022 if (iova & 0x07) {
852 if (ret) { 1023 pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
853 pr_err("%s: failed to get iova, %d\n", __func__, ret); 1024 return -EINVAL;
854 return ret; 1025 }
855 }
856 mutex_unlock(&dev->struct_mutex);
857 1026
858 if (iova & 0x07) { 1027 msm_host->tx_size = msm_host->tx_gem_obj->size;
859 pr_err("%s: buf NOT 8 bytes aligned\n", __func__); 1028 } else {
860 return -EINVAL; 1029 msm_host->tx_buf = dma_alloc_coherent(dev->dev, size,
1030 &msm_host->tx_buf_paddr, GFP_KERNEL);
1031 if (!msm_host->tx_buf) {
1032 ret = -ENOMEM;
1033 pr_err("%s: failed to allocate tx buf, %d\n",
1034 __func__, ret);
1035 return ret;
1036 }
1037
1038 msm_host->tx_size = size;
861 } 1039 }
862 1040
863 return 0; 1041 return 0;
@@ -874,14 +1052,19 @@ static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
874 msm_host->tx_gem_obj = NULL; 1052 msm_host->tx_gem_obj = NULL;
875 mutex_unlock(&dev->struct_mutex); 1053 mutex_unlock(&dev->struct_mutex);
876 } 1054 }
1055
1056 if (msm_host->tx_buf)
1057 dma_free_coherent(dev->dev, msm_host->tx_size, msm_host->tx_buf,
1058 msm_host->tx_buf_paddr);
877} 1059}
878 1060
879/* 1061/*
880 * prepare cmd buffer to be txed 1062 * prepare cmd buffer to be txed
881 */ 1063 */
882static int dsi_cmd_dma_add(struct drm_gem_object *tx_gem, 1064static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
883 const struct mipi_dsi_msg *msg) 1065 const struct mipi_dsi_msg *msg)
884{ 1066{
1067 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
885 struct mipi_dsi_packet packet; 1068 struct mipi_dsi_packet packet;
886 int len; 1069 int len;
887 int ret; 1070 int ret;
@@ -894,17 +1077,20 @@ static int dsi_cmd_dma_add(struct drm_gem_object *tx_gem,
894 } 1077 }
895 len = (packet.size + 3) & (~0x3); 1078 len = (packet.size + 3) & (~0x3);
896 1079
897 if (len > tx_gem->size) { 1080 if (len > msm_host->tx_size) {
898 pr_err("%s: packet size is too big\n", __func__); 1081 pr_err("%s: packet size is too big\n", __func__);
899 return -EINVAL; 1082 return -EINVAL;
900 } 1083 }
901 1084
902 data = msm_gem_vaddr(tx_gem); 1085 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
903 1086 data = msm_gem_vaddr(msm_host->tx_gem_obj);
904 if (IS_ERR(data)) { 1087 if (IS_ERR(data)) {
905 ret = PTR_ERR(data); 1088 ret = PTR_ERR(data);
906 pr_err("%s: get vaddr failed, %d\n", __func__, ret); 1089 pr_err("%s: get vaddr failed, %d\n", __func__, ret);
907 return ret; 1090 return ret;
1091 }
1092 } else {
1093 data = msm_host->tx_buf;
908 } 1094 }
909 1095
910 /* MSM specific command format in memory */ 1096 /* MSM specific command format in memory */
@@ -970,17 +1156,21 @@ static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
970 return msg->rx_len; 1156 return msg->rx_len;
971} 1157}
972 1158
973
974static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len) 1159static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
975{ 1160{
1161 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
976 int ret; 1162 int ret;
977 u32 iova; 1163 u32 dma_base;
978 bool triggered; 1164 bool triggered;
979 1165
980 ret = msm_gem_get_iova(msm_host->tx_gem_obj, 0, &iova); 1166 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
981 if (ret) { 1167 ret = msm_gem_get_iova(msm_host->tx_gem_obj, 0, &dma_base);
982 pr_err("%s: failed to get iova: %d\n", __func__, ret); 1168 if (ret) {
983 return ret; 1169 pr_err("%s: failed to get iova: %d\n", __func__, ret);
1170 return ret;
1171 }
1172 } else {
1173 dma_base = msm_host->tx_buf_paddr;
984 } 1174 }
985 1175
986 reinit_completion(&msm_host->dma_comp); 1176 reinit_completion(&msm_host->dma_comp);
@@ -988,7 +1178,7 @@ static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
988 dsi_wait4video_eng_busy(msm_host); 1178 dsi_wait4video_eng_busy(msm_host);
989 1179
990 triggered = msm_dsi_manager_cmd_xfer_trigger( 1180 triggered = msm_dsi_manager_cmd_xfer_trigger(
991 msm_host->id, iova, len); 1181 msm_host->id, dma_base, len);
992 if (triggered) { 1182 if (triggered) {
993 ret = wait_for_completion_timeout(&msm_host->dma_comp, 1183 ret = wait_for_completion_timeout(&msm_host->dma_comp,
994 msecs_to_jiffies(200)); 1184 msecs_to_jiffies(200));
@@ -1060,7 +1250,7 @@ static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
1060 int bllp_len = msm_host->mode->hdisplay * 1250 int bllp_len = msm_host->mode->hdisplay *
1061 dsi_get_bpp(msm_host->format) / 8; 1251 dsi_get_bpp(msm_host->format) / 8;
1062 1252
1063 len = dsi_cmd_dma_add(msm_host->tx_gem_obj, msg); 1253 len = dsi_cmd_dma_add(msm_host, msg);
1064 if (!len) { 1254 if (!len) {
1065 pr_err("%s: failed to add cmd type = 0x%x\n", 1255 pr_err("%s: failed to add cmd type = 0x%x\n",
1066 __func__, msg->type); 1256 __func__, msg->type);
@@ -1383,6 +1573,16 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
1383 1573
1384 msm_host->device_node = device_node; 1574 msm_host->device_node = device_node;
1385 1575
1576 if (of_property_read_bool(np, "syscon-sfpb")) {
1577 msm_host->sfpb = syscon_regmap_lookup_by_phandle(np,
1578 "syscon-sfpb");
1579 if (IS_ERR(msm_host->sfpb)) {
1580 dev_err(dev, "%s: failed to get sfpb regmap\n",
1581 __func__);
1582 return PTR_ERR(msm_host->sfpb);
1583 }
1584 }
1585
1386 return 0; 1586 return 0;
1387} 1587}
1388 1588
@@ -1408,12 +1608,6 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
1408 goto fail; 1608 goto fail;
1409 } 1609 }
1410 1610
1411 ret = dsi_clk_init(msm_host);
1412 if (ret) {
1413 pr_err("%s: unable to initialize dsi clks\n", __func__);
1414 goto fail;
1415 }
1416
1417 msm_host->ctrl_base = msm_ioremap(pdev, "dsi_ctrl", "DSI CTRL"); 1611 msm_host->ctrl_base = msm_ioremap(pdev, "dsi_ctrl", "DSI CTRL");
1418 if (IS_ERR(msm_host->ctrl_base)) { 1612 if (IS_ERR(msm_host->ctrl_base)) {
1419 pr_err("%s: unable to map Dsi ctrl base\n", __func__); 1613 pr_err("%s: unable to map Dsi ctrl base\n", __func__);
@@ -1437,6 +1631,12 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
1437 goto fail; 1631 goto fail;
1438 } 1632 }
1439 1633
1634 ret = dsi_clk_init(msm_host);
1635 if (ret) {
1636 pr_err("%s: unable to initialize dsi clks\n", __func__);
1637 goto fail;
1638 }
1639
1440 msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL); 1640 msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL);
1441 if (!msm_host->rx_buf) { 1641 if (!msm_host->rx_buf) {
1442 pr_err("%s: alloc rx temp buf failed\n", __func__); 1642 pr_err("%s: alloc rx temp buf failed\n", __func__);
@@ -1750,11 +1950,12 @@ int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
1750 return ret; 1950 return ret;
1751} 1951}
1752 1952
1753void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 iova, u32 len) 1953void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 dma_base,
1954 u32 len)
1754{ 1955{
1755 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1956 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1756 1957
1757 dsi_write(msm_host, REG_DSI_DMA_BASE, iova); 1958 dsi_write(msm_host, REG_DSI_DMA_BASE, dma_base);
1758 dsi_write(msm_host, REG_DSI_DMA_LEN, len); 1959 dsi_write(msm_host, REG_DSI_DMA_LEN, len);
1759 dsi_write(msm_host, REG_DSI_TRIG_DMA, 1); 1960 dsi_write(msm_host, REG_DSI_TRIG_DMA, 1);
1760 1961
@@ -1766,6 +1967,7 @@ int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
1766 struct msm_dsi_pll *src_pll) 1967 struct msm_dsi_pll *src_pll)
1767{ 1968{
1768 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1969 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1970 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
1769 struct clk *byte_clk_provider, *pixel_clk_provider; 1971 struct clk *byte_clk_provider, *pixel_clk_provider;
1770 int ret; 1972 int ret;
1771 1973
@@ -1791,6 +1993,22 @@ int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
1791 goto exit; 1993 goto exit;
1792 } 1994 }
1793 1995
1996 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
1997 ret = clk_set_parent(msm_host->dsi_clk_src, pixel_clk_provider);
1998 if (ret) {
1999 pr_err("%s: can't set parent to dsi_clk_src. ret=%d\n",
2000 __func__, ret);
2001 goto exit;
2002 }
2003
2004 ret = clk_set_parent(msm_host->esc_clk_src, byte_clk_provider);
2005 if (ret) {
2006 pr_err("%s: can't set parent to esc_clk_src. ret=%d\n",
2007 __func__, ret);
2008 goto exit;
2009 }
2010 }
2011
1794exit: 2012exit:
1795 return ret; 2013 return ret;
1796} 2014}
@@ -1828,6 +2046,20 @@ int msm_dsi_host_disable(struct mipi_dsi_host *host)
1828 return 0; 2046 return 0;
1829} 2047}
1830 2048
2049static void msm_dsi_sfpb_config(struct msm_dsi_host *msm_host, bool enable)
2050{
2051 enum sfpb_ahb_arb_master_port_en en;
2052
2053 if (!msm_host->sfpb)
2054 return;
2055
2056 en = enable ? SFPB_MASTER_PORT_ENABLE : SFPB_MASTER_PORT_DISABLE;
2057
2058 regmap_update_bits(msm_host->sfpb, REG_SFPB_GPREG,
2059 SFPB_GPREG_MASTER_PORT_EN__MASK,
2060 SFPB_GPREG_MASTER_PORT_EN(en));
2061}
2062
1831int msm_dsi_host_power_on(struct mipi_dsi_host *host) 2063int msm_dsi_host_power_on(struct mipi_dsi_host *host)
1832{ 2064{
1833 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2065 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
@@ -1840,6 +2072,8 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host)
1840 goto unlock_ret; 2072 goto unlock_ret;
1841 } 2073 }
1842 2074
2075 msm_dsi_sfpb_config(msm_host, true);
2076
1843 ret = dsi_calc_clk_rate(msm_host); 2077 ret = dsi_calc_clk_rate(msm_host);
1844 if (ret) { 2078 if (ret) {
1845 pr_err("%s: unable to calc clk rate, %d\n", __func__, ret); 2079 pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
@@ -1862,7 +2096,7 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host)
1862 dsi_phy_sw_reset(msm_host); 2096 dsi_phy_sw_reset(msm_host);
1863 ret = msm_dsi_manager_phy_enable(msm_host->id, 2097 ret = msm_dsi_manager_phy_enable(msm_host->id,
1864 msm_host->byte_clk_rate * 8, 2098 msm_host->byte_clk_rate * 8,
1865 clk_get_rate(msm_host->esc_clk), 2099 msm_host->esc_clk_rate,
1866 &clk_pre, &clk_post); 2100 &clk_pre, &clk_post);
1867 dsi_bus_clk_disable(msm_host); 2101 dsi_bus_clk_disable(msm_host);
1868 if (ret) { 2102 if (ret) {
@@ -1927,6 +2161,8 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host)
1927 2161
1928 dsi_host_regulator_disable(msm_host); 2162 dsi_host_regulator_disable(msm_host);
1929 2163
2164 msm_dsi_sfpb_config(msm_host, false);
2165
1930 DBG("-"); 2166 DBG("-");
1931 2167
1932 msm_host->power_on = false; 2168 msm_host->power_on = false;
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 0455ff75074a..58ba7ec17f51 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -774,7 +774,7 @@ restore_host0:
774 return ret; 774 return ret;
775} 775}
776 776
777bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len) 777bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len)
778{ 778{
779 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); 779 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
780 struct msm_dsi *msm_dsi0 = dsi_mgr_get_dsi(DSI_0); 780 struct msm_dsi *msm_dsi0 = dsi_mgr_get_dsi(DSI_0);
@@ -784,9 +784,9 @@ bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len)
784 return false; 784 return false;
785 785
786 if (IS_SYNC_NEEDED() && msm_dsi0) 786 if (IS_SYNC_NEEDED() && msm_dsi0)
787 msm_dsi_host_cmd_xfer_commit(msm_dsi0->host, iova, len); 787 msm_dsi_host_cmd_xfer_commit(msm_dsi0->host, dma_base, len);
788 788
789 msm_dsi_host_cmd_xfer_commit(host, iova, len); 789 msm_dsi_host_cmd_xfer_commit(host, dma_base, len);
790 790
791 return true; 791 return true;
792} 792}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index f1f955f571fa..91a95fb04a4a 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -277,6 +277,10 @@ static const struct of_device_id dsi_phy_dt_match[] = {
277 { .compatible = "qcom,dsi-phy-20nm", 277 { .compatible = "qcom,dsi-phy-20nm",
278 .data = &dsi_phy_20nm_cfgs }, 278 .data = &dsi_phy_20nm_cfgs },
279#endif 279#endif
280#ifdef CONFIG_DRM_MSM_DSI_28NM_8960_PHY
281 { .compatible = "qcom,dsi-phy-28nm-8960",
282 .data = &dsi_phy_28nm_8960_cfgs },
283#endif
280 {} 284 {}
281}; 285};
282 286
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index 0456b253239f..0d54ed00386d 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -43,6 +43,7 @@ struct msm_dsi_phy_cfg {
43extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs; 43extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs;
44extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs; 44extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs;
45extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs; 45extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs;
46extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs;
46 47
47struct msm_dsi_dphy_timing { 48struct msm_dsi_dphy_timing {
48 u32 clk_pre; 49 u32 clk_pre;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
new file mode 100644
index 000000000000..197b039ca1f1
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
@@ -0,0 +1,195 @@
1/*
2 * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include "dsi_phy.h"
15#include "dsi.xml.h"
16
17static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
18 struct msm_dsi_dphy_timing *timing)
19{
20 void __iomem *base = phy->base;
21
22 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_0,
23 DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
24 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_1,
25 DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
26 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_2,
27 DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
28 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_3, 0x0);
29 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_4,
30 DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
31 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_5,
32 DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
33 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_6,
34 DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
35 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_7,
36 DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
37 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_8,
38 DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
39 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_9,
40 DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
41 DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
42 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_10,
43 DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
44 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_11,
45 DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
46}
47
48static void dsi_28nm_phy_regulator_init(struct msm_dsi_phy *phy)
49{
50 void __iomem *base = phy->reg_base;
51
52 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_0, 0x3);
53 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_1, 1);
54 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_2, 1);
55 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_3, 0);
56 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_4,
57 0x100);
58}
59
60static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy)
61{
62 void __iomem *base = phy->reg_base;
63
64 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_0, 0x3);
65 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_1, 0xa);
66 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_2, 0x4);
67 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_3, 0x0);
68 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_4, 0x20);
69}
70
71static void dsi_28nm_phy_calibration(struct msm_dsi_phy *phy)
72{
73 void __iomem *base = phy->reg_base;
74 u32 status;
75 int i = 5000;
76
77 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CAL_PWR_CFG,
78 0x3);
79
80 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_2, 0x0);
81 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_1, 0x5a);
82 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_3, 0x10);
83 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_4, 0x1);
84 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_0, 0x1);
85
86 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_TRIGGER, 0x1);
87 usleep_range(5000, 6000);
88 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_TRIGGER, 0x0);
89
90 do {
91 status = dsi_phy_read(base +
92 REG_DSI_28nm_8960_PHY_MISC_CAL_STATUS);
93
94 if (!(status & DSI_28nm_8960_PHY_MISC_CAL_STATUS_CAL_BUSY))
95 break;
96
97 udelay(1);
98 } while (--i > 0);
99}
100
101static void dsi_28nm_phy_lane_config(struct msm_dsi_phy *phy)
102{
103 void __iomem *base = phy->base;
104 int i;
105
106 for (i = 0; i < 4; i++) {
107 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_0(i), 0x80);
108 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_1(i), 0x45);
109 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_2(i), 0x00);
110 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_DATAPATH(i),
111 0x00);
112 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_STR_0(i),
113 0x01);
114 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_STR_1(i),
115 0x66);
116 }
117
118 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_CFG_0, 0x40);
119 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_CFG_1, 0x67);
120 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_CFG_2, 0x0);
121 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_TEST_DATAPATH, 0x0);
122 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_TEST_STR0, 0x1);
123 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_TEST_STR1, 0x88);
124}
125
126static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
127 const unsigned long bit_rate, const unsigned long esc_rate)
128{
129 struct msm_dsi_dphy_timing *timing = &phy->timing;
130 void __iomem *base = phy->base;
131
132 DBG("");
133
134 if (msm_dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) {
135 dev_err(&phy->pdev->dev,
136 "%s: D-PHY timing calculation failed\n", __func__);
137 return -EINVAL;
138 }
139
140 dsi_28nm_phy_regulator_init(phy);
141
142 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LDO_CTRL, 0x04);
143
144 /* strength control */
145 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_STRENGTH_0, 0xff);
146 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_STRENGTH_1, 0x00);
147 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_STRENGTH_2, 0x06);
148
149 /* phy ctrl */
150 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_0, 0x5f);
151 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_1, 0x00);
152 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_2, 0x00);
153 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_3, 0x10);
154
155 dsi_28nm_phy_regulator_ctrl(phy);
156
157 dsi_28nm_phy_calibration(phy);
158
159 dsi_28nm_phy_lane_config(phy);
160
161 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_4, 0x0f);
162 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_1, 0x03);
163 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_0, 0x03);
164 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_4, 0x0);
165
166 dsi_28nm_dphy_set_timing(phy, timing);
167
168 return 0;
169}
170
171static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
172{
173 dsi_phy_write(phy->base + REG_DSI_28nm_8960_PHY_CTRL_0, 0x0);
174
175 /*
176 * Wait for the registers writes to complete in order to
177 * ensure that the phy is completely disabled
178 */
179 wmb();
180}
181
182const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs = {
183 .type = MSM_DSI_PHY_28NM_8960,
184 .src_pll_truthtable = { {true, true}, {false, true} },
185 .reg_cfg = {
186 .num = 1,
187 .regs = {
188 {"vddio", 1800000, 1800000, 100000, 100},
189 },
190 },
191 .ops = {
192 .enable = dsi_28nm_phy_enable,
193 .disable = dsi_28nm_phy_disable,
194 },
195};
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
index 5104fc9f9a53..5cd438f91afe 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
@@ -151,6 +151,9 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
151 case MSM_DSI_PHY_28NM_LP: 151 case MSM_DSI_PHY_28NM_LP:
152 pll = msm_dsi_pll_28nm_init(pdev, type, id); 152 pll = msm_dsi_pll_28nm_init(pdev, type, id);
153 break; 153 break;
154 case MSM_DSI_PHY_28NM_8960:
155 pll = msm_dsi_pll_28nm_8960_init(pdev, id);
156 break;
154 default: 157 default:
155 pll = ERR_PTR(-ENXIO); 158 pll = ERR_PTR(-ENXIO);
156 break; 159 break;
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
index 063caa2c5740..80b6038334a6 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
@@ -93,6 +93,16 @@ static inline struct msm_dsi_pll *msm_dsi_pll_28nm_init(
93 return ERR_PTR(-ENODEV); 93 return ERR_PTR(-ENODEV);
94} 94}
95#endif 95#endif
96#ifdef CONFIG_DRM_MSM_DSI_28NM_8960_PHY
97struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device *pdev,
98 int id);
99#else
100struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device *pdev,
101 int id)
102{
103 return ERR_PTR(-ENODEV);
104}
105#endif
96 106
97#endif /* __DSI_PLL_H__ */ 107#endif /* __DSI_PLL_H__ */
98 108
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
new file mode 100644
index 000000000000..38c90e1eb002
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
@@ -0,0 +1,533 @@
1/*
2 * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/clk-provider.h>
15
16#include "dsi_pll.h"
17#include "dsi.xml.h"
18
19/*
20 * DSI PLL 28nm (8960/A family) - clock diagram (eg: DSI1):
21 *
22 *
23 * +------+
24 * dsi1vco_clk ----o-----| DIV1 |---dsi1pllbit (not exposed as clock)
25 * F * byte_clk | +------+
26 * | bit clock divider (F / 8)
27 * |
28 * | +------+
29 * o-----| DIV2 |---dsi0pllbyte---o---> To byte RCG
30 * | +------+ | (sets parent rate)
31 * | byte clock divider (F) |
32 * | |
33 * | o---> To esc RCG
34 * | (doesn't set parent rate)
35 * |
36 * | +------+
37 * o-----| DIV3 |----dsi0pll------o---> To dsi RCG
38 * +------+ | (sets parent rate)
39 * dsi clock divider (F * magic) |
40 * |
41 * o---> To pixel rcg
42 * (doesn't set parent rate)
43 */
44
45#define POLL_MAX_READS 8000
46#define POLL_TIMEOUT_US 1
47
48#define NUM_PROVIDED_CLKS 2
49
50#define VCO_REF_CLK_RATE 27000000
51#define VCO_MIN_RATE 600000000
52#define VCO_MAX_RATE 1200000000
53
54#define DSI_BYTE_PLL_CLK 0
55#define DSI_PIXEL_PLL_CLK 1
56
57#define VCO_PREF_DIV_RATIO 27
58
59struct pll_28nm_cached_state {
60 unsigned long vco_rate;
61 u8 postdiv3;
62 u8 postdiv2;
63 u8 postdiv1;
64};
65
66struct clk_bytediv {
67 struct clk_hw hw;
68 void __iomem *reg;
69};
70
71struct dsi_pll_28nm {
72 struct msm_dsi_pll base;
73
74 int id;
75 struct platform_device *pdev;
76 void __iomem *mmio;
77
78 /* custom byte clock divider */
79 struct clk_bytediv *bytediv;
80
81 /* private clocks: */
82 struct clk *clks[NUM_DSI_CLOCKS_MAX];
83 u32 num_clks;
84
85 /* clock-provider: */
86 struct clk *provided_clks[NUM_PROVIDED_CLKS];
87 struct clk_onecell_data clk_data;
88
89 struct pll_28nm_cached_state cached_state;
90};
91
92#define to_pll_28nm(x) container_of(x, struct dsi_pll_28nm, base)
93
94static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,
95 int nb_tries, int timeout_us)
96{
97 bool pll_locked = false;
98 u32 val;
99
100 while (nb_tries--) {
101 val = pll_read(pll_28nm->mmio + REG_DSI_28nm_8960_PHY_PLL_RDY);
102 pll_locked = !!(val & DSI_28nm_8960_PHY_PLL_RDY_PLL_RDY);
103
104 if (pll_locked)
105 break;
106
107 udelay(timeout_us);
108 }
109 DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
110
111 return pll_locked;
112}
113
114/*
115 * Clock Callbacks
116 */
117static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
118 unsigned long parent_rate)
119{
120 struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
121 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
122 void __iomem *base = pll_28nm->mmio;
123 u32 val, temp, fb_divider;
124
125 DBG("rate=%lu, parent's=%lu", rate, parent_rate);
126
127 temp = rate / 10;
128 val = VCO_REF_CLK_RATE / 10;
129 fb_divider = (temp * VCO_PREF_DIV_RATIO) / val;
130 fb_divider = fb_divider / 2 - 1;
131 pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1,
132 fb_divider & 0xff);
133
134 val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2);
135
136 val |= (fb_divider >> 8) & 0x07;
137
138 pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2,
139 val);
140
141 val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);
142
143 val |= (VCO_PREF_DIV_RATIO - 1) & 0x3f;
144
145 pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3,
146 val);
147
148 pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_6,
149 0xf);
150
151 val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
152 val |= 0x7 << 4;
153 pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
154 val);
155
156 return 0;
157}
158
159static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)
160{
161 struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
162 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
163
164 return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,
165 POLL_TIMEOUT_US);
166}
167
168static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,
169 unsigned long parent_rate)
170{
171 struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
172 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
173 void __iomem *base = pll_28nm->mmio;
174 unsigned long vco_rate;
175 u32 status, fb_divider, temp, ref_divider;
176
177 VERB("parent_rate=%lu", parent_rate);
178
179 status = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0);
180
181 if (status & DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE) {
182 fb_divider = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1);
183 fb_divider &= 0xff;
184 temp = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2) & 0x07;
185 fb_divider = (temp << 8) | fb_divider;
186 fb_divider += 1;
187
188 ref_divider = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);
189 ref_divider &= 0x3f;
190 ref_divider += 1;
191
192 /* multiply by 2 */
193 vco_rate = (parent_rate / ref_divider) * fb_divider * 2;
194 } else {
195 vco_rate = 0;
196 }
197
198 DBG("returning vco rate = %lu", vco_rate);
199
200 return vco_rate;
201}
202
203static const struct clk_ops clk_ops_dsi_pll_28nm_vco = {
204 .round_rate = msm_dsi_pll_helper_clk_round_rate,
205 .set_rate = dsi_pll_28nm_clk_set_rate,
206 .recalc_rate = dsi_pll_28nm_clk_recalc_rate,
207 .prepare = msm_dsi_pll_helper_clk_prepare,
208 .unprepare = msm_dsi_pll_helper_clk_unprepare,
209 .is_enabled = dsi_pll_28nm_clk_is_enabled,
210};
211
212/*
213 * Custom byte clock divier clk_ops
214 *
215 * This clock is the entry point to configuring the PLL. The user (dsi host)
216 * will set this clock's rate to the desired byte clock rate. The VCO lock
217 * frequency is a multiple of the byte clock rate. The multiplication factor
218 * (shown as F in the diagram above) is a function of the byte clock rate.
219 *
220 * This custom divider clock ensures that its parent (VCO) is set to the
221 * desired rate, and that the byte clock postdivider (POSTDIV2) is configured
222 * accordingly
223 */
224#define to_clk_bytediv(_hw) container_of(_hw, struct clk_bytediv, hw)
225
226static unsigned long clk_bytediv_recalc_rate(struct clk_hw *hw,
227 unsigned long parent_rate)
228{
229 struct clk_bytediv *bytediv = to_clk_bytediv(hw);
230 unsigned int div;
231
232 div = pll_read(bytediv->reg) & 0xff;
233
234 return parent_rate / (div + 1);
235}
236
237/* find multiplication factor(wrt byte clock) at which the VCO should be set */
238static unsigned int get_vco_mul_factor(unsigned long byte_clk_rate)
239{
240 unsigned long bit_mhz;
241
242 /* convert to bit clock in Mhz */
243 bit_mhz = (byte_clk_rate * 8) / 1000000;
244
245 if (bit_mhz < 125)
246 return 64;
247 else if (bit_mhz < 250)
248 return 32;
249 else if (bit_mhz < 600)
250 return 16;
251 else
252 return 8;
253}
254
255static long clk_bytediv_round_rate(struct clk_hw *hw, unsigned long rate,
256 unsigned long *prate)
257{
258 unsigned long best_parent;
259 unsigned int factor;
260
261 factor = get_vco_mul_factor(rate);
262
263 best_parent = rate * factor;
264 *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
265
266 return *prate / factor;
267}
268
269static int clk_bytediv_set_rate(struct clk_hw *hw, unsigned long rate,
270 unsigned long parent_rate)
271{
272 struct clk_bytediv *bytediv = to_clk_bytediv(hw);
273 u32 val;
274 unsigned int factor;
275
276 factor = get_vco_mul_factor(rate);
277
278 val = pll_read(bytediv->reg);
279 val |= (factor - 1) & 0xff;
280 pll_write(bytediv->reg, val);
281
282 return 0;
283}
284
285/* Our special byte clock divider ops */
286static const struct clk_ops clk_bytediv_ops = {
287 .round_rate = clk_bytediv_round_rate,
288 .set_rate = clk_bytediv_set_rate,
289 .recalc_rate = clk_bytediv_recalc_rate,
290};
291
292/*
293 * PLL Callbacks
294 */
295static int dsi_pll_28nm_enable_seq(struct msm_dsi_pll *pll)
296{
297 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
298 struct device *dev = &pll_28nm->pdev->dev;
299 void __iomem *base = pll_28nm->mmio;
300 bool locked;
301 unsigned int bit_div, byte_div;
302 int max_reads = 1000, timeout_us = 100;
303 u32 val;
304
305 DBG("id=%d", pll_28nm->id);
306
307 /*
308 * before enabling the PLL, configure the bit clock divider since we
309 * don't expose it as a clock to the outside world
310 * 1: read back the byte clock divider that should already be set
311 * 2: divide by 8 to get bit clock divider
312 * 3: write it to POSTDIV1
313 */
314 val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9);
315 byte_div = val + 1;
316 bit_div = byte_div / 8;
317
318 val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
319 val &= ~0xf;
320 val |= (bit_div - 1);
321 pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8, val);
322
323 /* enable the PLL */
324 pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0,
325 DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE);
326
327 locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
328
329 if (unlikely(!locked))
330 dev_err(dev, "DSI PLL lock failed\n");
331 else
332 DBG("DSI PLL lock success");
333
334 return locked ? 0 : -EINVAL;
335}
336
337static void dsi_pll_28nm_disable_seq(struct msm_dsi_pll *pll)
338{
339 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
340
341 DBG("id=%d", pll_28nm->id);
342 pll_write(pll_28nm->mmio + REG_DSI_28nm_8960_PHY_PLL_CTRL_0, 0x00);
343}
344
345static void dsi_pll_28nm_save_state(struct msm_dsi_pll *pll)
346{
347 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
348 struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
349 void __iomem *base = pll_28nm->mmio;
350
351 cached_state->postdiv3 =
352 pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10);
353 cached_state->postdiv2 =
354 pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9);
355 cached_state->postdiv1 =
356 pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
357
358 cached_state->vco_rate = clk_hw_get_rate(&pll->clk_hw);
359}
360
361static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)
362{
363 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
364 struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
365 void __iomem *base = pll_28nm->mmio;
366 int ret;
367
368 ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
369 cached_state->vco_rate, 0);
370 if (ret) {
371 dev_err(&pll_28nm->pdev->dev,
372 "restore vco rate failed. ret=%d\n", ret);
373 return ret;
374 }
375
376 pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10,
377 cached_state->postdiv3);
378 pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9,
379 cached_state->postdiv2);
380 pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
381 cached_state->postdiv1);
382
383 return 0;
384}
385
386static int dsi_pll_28nm_get_provider(struct msm_dsi_pll *pll,
387 struct clk **byte_clk_provider,
388 struct clk **pixel_clk_provider)
389{
390 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
391
392 if (byte_clk_provider)
393 *byte_clk_provider = pll_28nm->provided_clks[DSI_BYTE_PLL_CLK];
394 if (pixel_clk_provider)
395 *pixel_clk_provider =
396 pll_28nm->provided_clks[DSI_PIXEL_PLL_CLK];
397
398 return 0;
399}
400
401static void dsi_pll_28nm_destroy(struct msm_dsi_pll *pll)
402{
403 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
404
405 msm_dsi_pll_helper_unregister_clks(pll_28nm->pdev,
406 pll_28nm->clks, pll_28nm->num_clks);
407}
408
409static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
410{
411 char *clk_name, *parent_name, *vco_name;
412 struct clk_init_data vco_init = {
413 .parent_names = (const char *[]){ "pxo" },
414 .num_parents = 1,
415 .ops = &clk_ops_dsi_pll_28nm_vco,
416 };
417 struct device *dev = &pll_28nm->pdev->dev;
418 struct clk **clks = pll_28nm->clks;
419 struct clk **provided_clks = pll_28nm->provided_clks;
420 struct clk_bytediv *bytediv;
421 struct clk_init_data bytediv_init = { };
422 int ret, num = 0;
423
424 DBG("%d", pll_28nm->id);
425
426 bytediv = devm_kzalloc(dev, sizeof(*bytediv), GFP_KERNEL);
427 if (!bytediv)
428 return -ENOMEM;
429
430 vco_name = devm_kzalloc(dev, 32, GFP_KERNEL);
431 if (!vco_name)
432 return -ENOMEM;
433
434 parent_name = devm_kzalloc(dev, 32, GFP_KERNEL);
435 if (!parent_name)
436 return -ENOMEM;
437
438 clk_name = devm_kzalloc(dev, 32, GFP_KERNEL);
439 if (!clk_name)
440 return -ENOMEM;
441
442 pll_28nm->bytediv = bytediv;
443
444 snprintf(vco_name, 32, "dsi%dvco_clk", pll_28nm->id);
445 vco_init.name = vco_name;
446
447 pll_28nm->base.clk_hw.init = &vco_init;
448
449 clks[num++] = clk_register(dev, &pll_28nm->base.clk_hw);
450
451 /* prepare and register bytediv */
452 bytediv->hw.init = &bytediv_init;
453 bytediv->reg = pll_28nm->mmio + REG_DSI_28nm_8960_PHY_PLL_CTRL_9;
454
455 snprintf(parent_name, 32, "dsi%dvco_clk", pll_28nm->id);
456 snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->id);
457
458 bytediv_init.name = clk_name;
459 bytediv_init.ops = &clk_bytediv_ops;
460 bytediv_init.flags = CLK_SET_RATE_PARENT;
461 bytediv_init.parent_names = (const char * const *) &parent_name;
462 bytediv_init.num_parents = 1;
463
464 /* DIV2 */
465 clks[num++] = provided_clks[DSI_BYTE_PLL_CLK] =
466 clk_register(dev, &bytediv->hw);
467
468 snprintf(clk_name, 32, "dsi%dpll", pll_28nm->id);
469 /* DIV3 */
470 clks[num++] = provided_clks[DSI_PIXEL_PLL_CLK] =
471 clk_register_divider(dev, clk_name,
472 parent_name, 0, pll_28nm->mmio +
473 REG_DSI_28nm_8960_PHY_PLL_CTRL_10,
474 0, 8, 0, NULL);
475
476 pll_28nm->num_clks = num;
477
478 pll_28nm->clk_data.clk_num = NUM_PROVIDED_CLKS;
479 pll_28nm->clk_data.clks = provided_clks;
480
481 ret = of_clk_add_provider(dev->of_node,
482 of_clk_src_onecell_get, &pll_28nm->clk_data);
483 if (ret) {
484 dev_err(dev, "failed to register clk provider: %d\n", ret);
485 return ret;
486 }
487
488 return 0;
489}
490
491struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device *pdev,
492 int id)
493{
494 struct dsi_pll_28nm *pll_28nm;
495 struct msm_dsi_pll *pll;
496 int ret;
497
498 if (!pdev)
499 return ERR_PTR(-ENODEV);
500
501 pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL);
502 if (!pll_28nm)
503 return ERR_PTR(-ENOMEM);
504
505 pll_28nm->pdev = pdev;
506 pll_28nm->id = id + 1;
507
508 pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
509 if (IS_ERR_OR_NULL(pll_28nm->mmio)) {
510 dev_err(&pdev->dev, "%s: failed to map pll base\n", __func__);
511 return ERR_PTR(-ENOMEM);
512 }
513
514 pll = &pll_28nm->base;
515 pll->min_rate = VCO_MIN_RATE;
516 pll->max_rate = VCO_MAX_RATE;
517 pll->get_provider = dsi_pll_28nm_get_provider;
518 pll->destroy = dsi_pll_28nm_destroy;
519 pll->disable_seq = dsi_pll_28nm_disable_seq;
520 pll->save_state = dsi_pll_28nm_save_state;
521 pll->restore_state = dsi_pll_28nm_restore_state;
522
523 pll->en_seq_cnt = 1;
524 pll->enable_seqs[0] = dsi_pll_28nm_enable_seq;
525
526 ret = pll_28nm_register(pll_28nm);
527 if (ret) {
528 dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
529 return ERR_PTR(ret);
530 }
531
532 return pll;
533}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 1f4a95eeb348..9a0989c0b4de 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -17,6 +17,8 @@
17 */ 17 */
18 18
19#include <linux/of_irq.h> 19#include <linux/of_irq.h>
20#include <linux/of_gpio.h>
21
20#include "hdmi.h" 22#include "hdmi.h"
21 23
22void hdmi_set_mode(struct hdmi *hdmi, bool power_on) 24void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
@@ -322,8 +324,6 @@ fail:
322 * The hdmi device: 324 * The hdmi device:
323 */ 325 */
324 326
325#include <linux/of_gpio.h>
326
327#define HDMI_CFG(item, entry) \ 327#define HDMI_CFG(item, entry) \
328 .item ## _names = item ##_names_ ## entry, \ 328 .item ## _names = item ##_names_ ## entry, \
329 .item ## _cnt = ARRAY_SIZE(item ## _names_ ## entry) 329 .item ## _cnt = ARRAY_SIZE(item ## _names_ ## entry)
@@ -388,17 +388,6 @@ static struct hdmi_platform_config hdmi_tx_8996_config = {
388 .hpd_freq = hpd_clk_freq_8x74, 388 .hpd_freq = hpd_clk_freq_8x74,
389}; 389};
390 390
391static const struct of_device_id dt_match[] = {
392 { .compatible = "qcom,hdmi-tx-8996", .data = &hdmi_tx_8996_config },
393 { .compatible = "qcom,hdmi-tx-8994", .data = &hdmi_tx_8994_config },
394 { .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8084_config },
395 { .compatible = "qcom,hdmi-tx-8974", .data = &hdmi_tx_8974_config },
396 { .compatible = "qcom,hdmi-tx-8960", .data = &hdmi_tx_8960_config },
397 { .compatible = "qcom,hdmi-tx-8660", .data = &hdmi_tx_8660_config },
398 {}
399};
400
401#ifdef CONFIG_OF
402static int get_gpio(struct device *dev, struct device_node *of_node, const char *name) 391static int get_gpio(struct device *dev, struct device_node *of_node, const char *name)
403{ 392{
404 int gpio = of_get_named_gpio(of_node, name, 0); 393 int gpio = of_get_named_gpio(of_node, name, 0);
@@ -413,7 +402,6 @@ static int get_gpio(struct device *dev, struct device_node *of_node, const char
413 } 402 }
414 return gpio; 403 return gpio;
415} 404}
416#endif
417 405
418static int hdmi_bind(struct device *dev, struct device *master, void *data) 406static int hdmi_bind(struct device *dev, struct device *master, void *data)
419{ 407{
@@ -421,16 +409,12 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
421 struct msm_drm_private *priv = drm->dev_private; 409 struct msm_drm_private *priv = drm->dev_private;
422 static struct hdmi_platform_config *hdmi_cfg; 410 static struct hdmi_platform_config *hdmi_cfg;
423 struct hdmi *hdmi; 411 struct hdmi *hdmi;
424#ifdef CONFIG_OF
425 struct device_node *of_node = dev->of_node; 412 struct device_node *of_node = dev->of_node;
426 const struct of_device_id *match;
427 413
428 match = of_match_node(dt_match, of_node); 414 hdmi_cfg = (struct hdmi_platform_config *)
429 if (match && match->data) { 415 of_device_get_match_data(dev);
430 hdmi_cfg = (struct hdmi_platform_config *)match->data; 416 if (!hdmi_cfg) {
431 DBG("hdmi phy: %s", match->compatible); 417 dev_err(dev, "unknown hdmi_cfg: %s\n", of_node->name);
432 } else {
433 dev_err(dev, "unknown phy: %s\n", of_node->name);
434 return -ENXIO; 418 return -ENXIO;
435 } 419 }
436 420
@@ -443,55 +427,6 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
443 hdmi_cfg->mux_sel_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-sel"); 427 hdmi_cfg->mux_sel_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-sel");
444 hdmi_cfg->mux_lpm_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-lpm"); 428 hdmi_cfg->mux_lpm_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-lpm");
445 429
446#else
447 static struct hdmi_platform_config config = {};
448 static const char *hpd_clk_names[] = {
449 "core_clk", "master_iface_clk", "slave_iface_clk",
450 };
451 if (cpu_is_apq8064()) {
452 static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
453 config.phy_init = hdmi_phy_8960_init;
454 config.hpd_reg_names = hpd_reg_names;
455 config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
456 config.hpd_clk_names = hpd_clk_names;
457 config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
458 config.ddc_clk_gpio = 70;
459 config.ddc_data_gpio = 71;
460 config.hpd_gpio = 72;
461 config.mux_en_gpio = -1;
462 config.mux_sel_gpio = -1;
463 } else if (cpu_is_msm8960() || cpu_is_msm8960ab()) {
464 static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
465 config.phy_init = hdmi_phy_8960_init;
466 config.hpd_reg_names = hpd_reg_names;
467 config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
468 config.hpd_clk_names = hpd_clk_names;
469 config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
470 config.ddc_clk_gpio = 100;
471 config.ddc_data_gpio = 101;
472 config.hpd_gpio = 102;
473 config.mux_en_gpio = -1;
474 config.mux_sel_gpio = -1;
475 } else if (cpu_is_msm8x60()) {
476 static const char *hpd_reg_names[] = {
477 "8901_hdmi_mvs", "8901_mpp0"
478 };
479 config.phy_init = hdmi_phy_8x60_init;
480 config.hpd_reg_names = hpd_reg_names;
481 config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
482 config.hpd_clk_names = hpd_clk_names;
483 config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
484 config.ddc_clk_gpio = 170;
485 config.ddc_data_gpio = 171;
486 config.hpd_gpio = 172;
487 config.mux_en_gpio = -1;
488 config.mux_sel_gpio = -1;
489 }
490 config.mmio_name = "hdmi_msm_hdmi_addr";
491 config.qfprom_mmio_name = "hdmi_msm_qfprom_addr";
492
493 hdmi_cfg = &config;
494#endif
495 dev->platform_data = hdmi_cfg; 430 dev->platform_data = hdmi_cfg;
496 431
497 hdmi = hdmi_init(to_platform_device(dev)); 432 hdmi = hdmi_init(to_platform_device(dev));
@@ -529,6 +464,16 @@ static int hdmi_dev_remove(struct platform_device *pdev)
529 return 0; 464 return 0;
530} 465}
531 466
467static const struct of_device_id dt_match[] = {
468 { .compatible = "qcom,hdmi-tx-8996", .data = &hdmi_tx_8996_config },
469 { .compatible = "qcom,hdmi-tx-8994", .data = &hdmi_tx_8994_config },
470 { .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8084_config },
471 { .compatible = "qcom,hdmi-tx-8974", .data = &hdmi_tx_8974_config },
472 { .compatible = "qcom,hdmi-tx-8960", .data = &hdmi_tx_8960_config },
473 { .compatible = "qcom,hdmi-tx-8660", .data = &hdmi_tx_8660_config },
474 {}
475};
476
532static struct platform_driver hdmi_driver = { 477static struct platform_driver hdmi_driver = {
533 .probe = hdmi_dev_probe, 478 .probe = hdmi_dev_probe,
534 .remove = hdmi_dev_remove, 479 .remove = hdmi_dev_remove,
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 6ac9aa165768..28df397c3b04 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -678,7 +678,8 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
678 drm_flip_work_init(&mdp4_crtc->unref_cursor_work, 678 drm_flip_work_init(&mdp4_crtc->unref_cursor_work,
679 "unref cursor", unref_cursor_worker); 679 "unref cursor", unref_cursor_worker);
680 680
681 drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs); 681 drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs,
682 NULL);
682 drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs); 683 drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
683 plane->crtc = crtc; 684 plane->crtc = crtc;
684 685
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c
new file mode 100644
index 000000000000..2f57e9453b67
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c
@@ -0,0 +1,198 @@
1/*
2 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
3 * Copyright (c) 2014, Inforce Computing. All rights reserved.
4 *
5 * Author: Vinay Simha <vinaysimha@inforcecomputing.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "mdp4_kms.h"
21
22#include "drm_crtc.h"
23#include "drm_crtc_helper.h"
24
25struct mdp4_dsi_encoder {
26 struct drm_encoder base;
27 struct drm_panel *panel;
28 bool enabled;
29};
30#define to_mdp4_dsi_encoder(x) container_of(x, struct mdp4_dsi_encoder, base)
31
32static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
33{
34 struct msm_drm_private *priv = encoder->dev->dev_private;
35 return to_mdp4_kms(to_mdp_kms(priv->kms));
36}
37
38static void mdp4_dsi_encoder_destroy(struct drm_encoder *encoder)
39{
40 struct mdp4_dsi_encoder *mdp4_dsi_encoder = to_mdp4_dsi_encoder(encoder);
41
42 drm_encoder_cleanup(encoder);
43 kfree(mdp4_dsi_encoder);
44}
45
46static const struct drm_encoder_funcs mdp4_dsi_encoder_funcs = {
47 .destroy = mdp4_dsi_encoder_destroy,
48};
49
50static bool mdp4_dsi_encoder_mode_fixup(struct drm_encoder *encoder,
51 const struct drm_display_mode *mode,
52 struct drm_display_mode *adjusted_mode)
53{
54 return true;
55}
56
57static void mdp4_dsi_encoder_mode_set(struct drm_encoder *encoder,
58 struct drm_display_mode *mode,
59 struct drm_display_mode *adjusted_mode)
60{
61 struct mdp4_kms *mdp4_kms = get_kms(encoder);
62 uint32_t dsi_hsync_skew, vsync_period, vsync_len, ctrl_pol;
63 uint32_t display_v_start, display_v_end;
64 uint32_t hsync_start_x, hsync_end_x;
65
66 mode = adjusted_mode;
67
68 DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
69 mode->base.id, mode->name,
70 mode->vrefresh, mode->clock,
71 mode->hdisplay, mode->hsync_start,
72 mode->hsync_end, mode->htotal,
73 mode->vdisplay, mode->vsync_start,
74 mode->vsync_end, mode->vtotal,
75 mode->type, mode->flags);
76
77 ctrl_pol = 0;
78 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
79 ctrl_pol |= MDP4_DSI_CTRL_POLARITY_HSYNC_LOW;
80 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
81 ctrl_pol |= MDP4_DSI_CTRL_POLARITY_VSYNC_LOW;
82 /* probably need to get DATA_EN polarity from panel.. */
83
84 dsi_hsync_skew = 0; /* get this from panel? */
85
86 hsync_start_x = (mode->htotal - mode->hsync_start);
87 hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1;
88
89 vsync_period = mode->vtotal * mode->htotal;
90 vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal;
91 display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dsi_hsync_skew;
92 display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dsi_hsync_skew - 1;
93
94 mdp4_write(mdp4_kms, REG_MDP4_DSI_HSYNC_CTRL,
95 MDP4_DSI_HSYNC_CTRL_PULSEW(mode->hsync_end - mode->hsync_start) |
96 MDP4_DSI_HSYNC_CTRL_PERIOD(mode->htotal));
97 mdp4_write(mdp4_kms, REG_MDP4_DSI_VSYNC_PERIOD, vsync_period);
98 mdp4_write(mdp4_kms, REG_MDP4_DSI_VSYNC_LEN, vsync_len);
99 mdp4_write(mdp4_kms, REG_MDP4_DSI_DISPLAY_HCTRL,
100 MDP4_DSI_DISPLAY_HCTRL_START(hsync_start_x) |
101 MDP4_DSI_DISPLAY_HCTRL_END(hsync_end_x));
102 mdp4_write(mdp4_kms, REG_MDP4_DSI_DISPLAY_VSTART, display_v_start);
103 mdp4_write(mdp4_kms, REG_MDP4_DSI_DISPLAY_VEND, display_v_end);
104
105 mdp4_write(mdp4_kms, REG_MDP4_DSI_CTRL_POLARITY, ctrl_pol);
106 mdp4_write(mdp4_kms, REG_MDP4_DSI_UNDERFLOW_CLR,
107 MDP4_DSI_UNDERFLOW_CLR_ENABLE_RECOVERY |
108 MDP4_DSI_UNDERFLOW_CLR_COLOR(0xff));
109 mdp4_write(mdp4_kms, REG_MDP4_DSI_ACTIVE_HCTL,
110 MDP4_DSI_ACTIVE_HCTL_START(0) |
111 MDP4_DSI_ACTIVE_HCTL_END(0));
112 mdp4_write(mdp4_kms, REG_MDP4_DSI_HSYNC_SKEW, dsi_hsync_skew);
113 mdp4_write(mdp4_kms, REG_MDP4_DSI_BORDER_CLR, 0);
114 mdp4_write(mdp4_kms, REG_MDP4_DSI_ACTIVE_VSTART, 0);
115 mdp4_write(mdp4_kms, REG_MDP4_DSI_ACTIVE_VEND, 0);
116}
117
118static void mdp4_dsi_encoder_disable(struct drm_encoder *encoder)
119{
120 struct mdp4_dsi_encoder *mdp4_dsi_encoder = to_mdp4_dsi_encoder(encoder);
121 struct mdp4_kms *mdp4_kms = get_kms(encoder);
122
123 if (!mdp4_dsi_encoder->enabled)
124 return;
125
126 mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0);
127
128 /*
129 * Wait for a vsync so we know the ENABLE=0 latched before
130 * the (connector) source of the vsync's gets disabled,
131 * otherwise we end up in a funny state if we re-enable
132 * before the disable latches, which results that some of
133 * the settings changes for the new modeset (like new
134 * scanout buffer) don't latch properly..
135 */
136 mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_PRIMARY_VSYNC);
137
138 mdp4_dsi_encoder->enabled = false;
139}
140
141static void mdp4_dsi_encoder_enable(struct drm_encoder *encoder)
142{
143 struct mdp4_dsi_encoder *mdp4_dsi_encoder = to_mdp4_dsi_encoder(encoder);
144 struct mdp4_kms *mdp4_kms = get_kms(encoder);
145
146 if (mdp4_dsi_encoder->enabled)
147 return;
148
149 mdp4_crtc_set_config(encoder->crtc,
150 MDP4_DMA_CONFIG_PACK_ALIGN_MSB |
151 MDP4_DMA_CONFIG_DEFLKR_EN |
152 MDP4_DMA_CONFIG_DITHER_EN |
153 MDP4_DMA_CONFIG_R_BPC(BPC8) |
154 MDP4_DMA_CONFIG_G_BPC(BPC8) |
155 MDP4_DMA_CONFIG_B_BPC(BPC8) |
156 MDP4_DMA_CONFIG_PACK(0x21));
157
158 mdp4_crtc_set_intf(encoder->crtc, INTF_DSI_VIDEO, 0);
159
160 mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 1);
161
162 mdp4_dsi_encoder->enabled = true;
163}
164
165static const struct drm_encoder_helper_funcs mdp4_dsi_encoder_helper_funcs = {
166 .mode_fixup = mdp4_dsi_encoder_mode_fixup,
167 .mode_set = mdp4_dsi_encoder_mode_set,
168 .disable = mdp4_dsi_encoder_disable,
169 .enable = mdp4_dsi_encoder_enable,
170};
171
172/* initialize encoder */
173struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev)
174{
175 struct drm_encoder *encoder = NULL;
176 struct mdp4_dsi_encoder *mdp4_dsi_encoder;
177 int ret;
178
179 mdp4_dsi_encoder = kzalloc(sizeof(*mdp4_dsi_encoder), GFP_KERNEL);
180 if (!mdp4_dsi_encoder) {
181 ret = -ENOMEM;
182 goto fail;
183 }
184
185 encoder = &mdp4_dsi_encoder->base;
186
187 drm_encoder_init(dev, encoder, &mdp4_dsi_encoder_funcs,
188 DRM_MODE_ENCODER_DSI, NULL);
189 drm_encoder_helper_add(encoder, &mdp4_dsi_encoder_helper_funcs);
190
191 return encoder;
192
193fail:
194 if (encoder)
195 mdp4_dsi_encoder_destroy(encoder);
196
197 return ERR_PTR(ret);
198}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
index 89614c6a6c1b..a21df54cb50f 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
@@ -262,7 +262,7 @@ struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
262 encoder = &mdp4_dtv_encoder->base; 262 encoder = &mdp4_dtv_encoder->base;
263 263
264 drm_encoder_init(dev, encoder, &mdp4_dtv_encoder_funcs, 264 drm_encoder_init(dev, encoder, &mdp4_dtv_encoder_funcs,
265 DRM_MODE_ENCODER_TMDS); 265 DRM_MODE_ENCODER_TMDS, NULL);
266 drm_encoder_helper_add(encoder, &mdp4_dtv_encoder_helper_funcs); 266 drm_encoder_helper_add(encoder, &mdp4_dtv_encoder_helper_funcs);
267 267
268 mdp4_dtv_encoder->src_clk = devm_clk_get(dev->dev, "src_clk"); 268 mdp4_dtv_encoder->src_clk = devm_clk_get(dev->dev, "src_clk");
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
index 5ed38cf548a1..a521207db8a1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
@@ -29,7 +29,7 @@ void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
29 29
30static void mdp4_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus) 30static void mdp4_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
31{ 31{
32 DRM_ERROR("errors: %08x\n", irqstatus); 32 DRM_ERROR_RATELIMITED("errors: %08x\n", irqstatus);
33} 33}
34 34
35void mdp4_irq_preinstall(struct msm_kms *kms) 35void mdp4_irq_preinstall(struct msm_kms *kms)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 077f7521a971..5a8e3d6bcbff 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -169,7 +169,14 @@ static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
169 struct drm_encoder *encoder) 169 struct drm_encoder *encoder)
170{ 170{
171 /* if we had >1 encoder, we'd need something more clever: */ 171 /* if we had >1 encoder, we'd need something more clever: */
172 return mdp4_dtv_round_pixclk(encoder, rate); 172 switch (encoder->encoder_type) {
173 case DRM_MODE_ENCODER_TMDS:
174 return mdp4_dtv_round_pixclk(encoder, rate);
175 case DRM_MODE_ENCODER_LVDS:
176 case DRM_MODE_ENCODER_DSI:
177 default:
178 return rate;
179 }
173} 180}
174 181
175static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file) 182static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
@@ -240,19 +247,18 @@ int mdp4_enable(struct mdp4_kms *mdp4_kms)
240 return 0; 247 return 0;
241} 248}
242 249
243#ifdef CONFIG_OF 250static struct device_node *mdp4_detect_lcdc_panel(struct drm_device *dev)
244static struct drm_panel *detect_panel(struct drm_device *dev)
245{ 251{
246 struct device_node *endpoint, *panel_node; 252 struct device_node *endpoint, *panel_node;
247 struct device_node *np = dev->dev->of_node; 253 struct device_node *np = dev->dev->of_node;
248 struct drm_panel *panel = NULL;
249 254
250 endpoint = of_graph_get_next_endpoint(np, NULL); 255 endpoint = of_graph_get_next_endpoint(np, NULL);
251 if (!endpoint) { 256 if (!endpoint) {
252 dev_err(dev->dev, "no valid endpoint\n"); 257 DBG("no endpoint in MDP4 to fetch LVDS panel\n");
253 return ERR_PTR(-ENODEV); 258 return NULL;
254 } 259 }
255 260
261 /* don't proceed if we have an endpoint but no panel_node tied to it */
256 panel_node = of_graph_get_remote_port_parent(endpoint); 262 panel_node = of_graph_get_remote_port_parent(endpoint);
257 if (!panel_node) { 263 if (!panel_node) {
258 dev_err(dev->dev, "no valid panel node\n"); 264 dev_err(dev->dev, "no valid panel node\n");
@@ -262,132 +268,185 @@ static struct drm_panel *detect_panel(struct drm_device *dev)
262 268
263 of_node_put(endpoint); 269 of_node_put(endpoint);
264 270
265 panel = of_drm_find_panel(panel_node); 271 return panel_node;
266 if (!panel) {
267 of_node_put(panel_node);
268 return ERR_PTR(-EPROBE_DEFER);
269 }
270
271 return panel;
272} 272}
273#else
274static struct drm_panel *detect_panel(struct drm_device *dev)
275{
276 // ??? maybe use a module param to specify which panel is attached?
277}
278#endif
279 273
280static int modeset_init(struct mdp4_kms *mdp4_kms) 274static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
275 int intf_type)
281{ 276{
282 struct drm_device *dev = mdp4_kms->dev; 277 struct drm_device *dev = mdp4_kms->dev;
283 struct msm_drm_private *priv = dev->dev_private; 278 struct msm_drm_private *priv = dev->dev_private;
284 struct drm_plane *plane;
285 struct drm_crtc *crtc;
286 struct drm_encoder *encoder; 279 struct drm_encoder *encoder;
287 struct drm_connector *connector; 280 struct drm_connector *connector;
288 struct drm_panel *panel; 281 struct device_node *panel_node;
282 struct drm_encoder *dsi_encs[MSM_DSI_ENCODER_NUM];
283 int i, dsi_id;
289 int ret; 284 int ret;
290 285
291 /* construct non-private planes: */ 286 switch (intf_type) {
292 plane = mdp4_plane_init(dev, VG1, false); 287 case DRM_MODE_ENCODER_LVDS:
293 if (IS_ERR(plane)) { 288 /*
294 dev_err(dev->dev, "failed to construct plane for VG1\n"); 289 * bail out early if:
295 ret = PTR_ERR(plane); 290 * - there is no panel node (no need to initialize lcdc
296 goto fail; 291 * encoder and lvds connector), or
297 } 292 * - panel node is a bad pointer
298 priv->planes[priv->num_planes++] = plane; 293 */
294 panel_node = mdp4_detect_lcdc_panel(dev);
295 if (IS_ERR_OR_NULL(panel_node))
296 return PTR_ERR(panel_node);
297
298 encoder = mdp4_lcdc_encoder_init(dev, panel_node);
299 if (IS_ERR(encoder)) {
300 dev_err(dev->dev, "failed to construct LCDC encoder\n");
301 return PTR_ERR(encoder);
302 }
299 303
300 plane = mdp4_plane_init(dev, VG2, false); 304 /* LCDC can be hooked to DMA_P (TODO: Add DMA_S later?) */
301 if (IS_ERR(plane)) { 305 encoder->possible_crtcs = 1 << DMA_P;
302 dev_err(dev->dev, "failed to construct plane for VG2\n");
303 ret = PTR_ERR(plane);
304 goto fail;
305 }
306 priv->planes[priv->num_planes++] = plane;
307 306
308 /* 307 connector = mdp4_lvds_connector_init(dev, panel_node, encoder);
309 * Setup the LCDC/LVDS path: RGB2 -> DMA_P -> LCDC -> LVDS: 308 if (IS_ERR(connector)) {
310 */ 309 dev_err(dev->dev, "failed to initialize LVDS connector\n");
310 return PTR_ERR(connector);
311 }
311 312
312 panel = detect_panel(dev); 313 priv->encoders[priv->num_encoders++] = encoder;
313 if (IS_ERR(panel)) { 314 priv->connectors[priv->num_connectors++] = connector;
314 ret = PTR_ERR(panel);
315 dev_err(dev->dev, "failed to detect LVDS panel: %d\n", ret);
316 goto fail;
317 }
318 315
319 plane = mdp4_plane_init(dev, RGB2, true); 316 break;
320 if (IS_ERR(plane)) { 317 case DRM_MODE_ENCODER_TMDS:
321 dev_err(dev->dev, "failed to construct plane for RGB2\n"); 318 encoder = mdp4_dtv_encoder_init(dev);
322 ret = PTR_ERR(plane); 319 if (IS_ERR(encoder)) {
323 goto fail; 320 dev_err(dev->dev, "failed to construct DTV encoder\n");
324 } 321 return PTR_ERR(encoder);
322 }
325 323
326 crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, 0, DMA_P); 324 /* DTV can be hooked to DMA_E: */
327 if (IS_ERR(crtc)) { 325 encoder->possible_crtcs = 1 << 1;
328 dev_err(dev->dev, "failed to construct crtc for DMA_P\n");
329 ret = PTR_ERR(crtc);
330 goto fail;
331 }
332 326
333 encoder = mdp4_lcdc_encoder_init(dev, panel); 327 if (priv->hdmi) {
334 if (IS_ERR(encoder)) { 328 /* Construct bridge/connector for HDMI: */
335 dev_err(dev->dev, "failed to construct LCDC encoder\n"); 329 ret = hdmi_modeset_init(priv->hdmi, dev, encoder);
336 ret = PTR_ERR(encoder); 330 if (ret) {
337 goto fail; 331 dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
338 } 332 return ret;
333 }
334 }
339 335
340 /* LCDC can be hooked to DMA_P: */ 336 priv->encoders[priv->num_encoders++] = encoder;
341 encoder->possible_crtcs = 1 << priv->num_crtcs;
342 337
343 priv->crtcs[priv->num_crtcs++] = crtc; 338 break;
344 priv->encoders[priv->num_encoders++] = encoder; 339 case DRM_MODE_ENCODER_DSI:
340 /* only DSI1 supported for now */
341 dsi_id = 0;
345 342
346 connector = mdp4_lvds_connector_init(dev, panel, encoder); 343 if (!priv->dsi[dsi_id])
347 if (IS_ERR(connector)) { 344 break;
348 ret = PTR_ERR(connector);
349 dev_err(dev->dev, "failed to initialize LVDS connector: %d\n", ret);
350 goto fail;
351 }
352 345
353 priv->connectors[priv->num_connectors++] = connector; 346 for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
347 dsi_encs[i] = mdp4_dsi_encoder_init(dev);
348 if (IS_ERR(dsi_encs[i])) {
349 ret = PTR_ERR(dsi_encs[i]);
350 dev_err(dev->dev,
351 "failed to construct DSI encoder: %d\n",
352 ret);
353 return ret;
354 }
354 355
355 /* 356 /* TODO: Add DMA_S later? */
356 * Setup DTV/HDMI path: RGB1 -> DMA_E -> DTV -> HDMI: 357 dsi_encs[i]->possible_crtcs = 1 << DMA_P;
357 */ 358 priv->encoders[priv->num_encoders++] = dsi_encs[i];
359 }
358 360
359 plane = mdp4_plane_init(dev, RGB1, true); 361 ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, dsi_encs);
360 if (IS_ERR(plane)) { 362 if (ret) {
361 dev_err(dev->dev, "failed to construct plane for RGB1\n"); 363 dev_err(dev->dev, "failed to initialize DSI: %d\n",
362 ret = PTR_ERR(plane); 364 ret);
363 goto fail; 365 return ret;
364 } 366 }
365 367
366 crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, 1, DMA_E); 368 break;
367 if (IS_ERR(crtc)) { 369 default:
368 dev_err(dev->dev, "failed to construct crtc for DMA_E\n"); 370 dev_err(dev->dev, "Invalid or unsupported interface\n");
369 ret = PTR_ERR(crtc); 371 return -EINVAL;
370 goto fail;
371 } 372 }
372 373
373 encoder = mdp4_dtv_encoder_init(dev); 374 return 0;
374 if (IS_ERR(encoder)) { 375}
375 dev_err(dev->dev, "failed to construct DTV encoder\n"); 376
376 ret = PTR_ERR(encoder); 377static int modeset_init(struct mdp4_kms *mdp4_kms)
377 goto fail; 378{
379 struct drm_device *dev = mdp4_kms->dev;
380 struct msm_drm_private *priv = dev->dev_private;
381 struct drm_plane *plane;
382 struct drm_crtc *crtc;
383 int i, ret;
384 static const enum mdp4_pipe rgb_planes[] = {
385 RGB1, RGB2,
386 };
387 static const enum mdp4_pipe vg_planes[] = {
388 VG1, VG2,
389 };
390 static const enum mdp4_dma mdp4_crtcs[] = {
391 DMA_P, DMA_E,
392 };
393 static const char * const mdp4_crtc_names[] = {
394 "DMA_P", "DMA_E",
395 };
396 static const int mdp4_intfs[] = {
397 DRM_MODE_ENCODER_LVDS,
398 DRM_MODE_ENCODER_DSI,
399 DRM_MODE_ENCODER_TMDS,
400 };
401
402 /* construct non-private planes: */
403 for (i = 0; i < ARRAY_SIZE(vg_planes); i++) {
404 plane = mdp4_plane_init(dev, vg_planes[i], false);
405 if (IS_ERR(plane)) {
406 dev_err(dev->dev,
407 "failed to construct plane for VG%d\n", i + 1);
408 ret = PTR_ERR(plane);
409 goto fail;
410 }
411 priv->planes[priv->num_planes++] = plane;
378 } 412 }
379 413
380 /* DTV can be hooked to DMA_E: */ 414 for (i = 0; i < ARRAY_SIZE(mdp4_crtcs); i++) {
381 encoder->possible_crtcs = 1 << priv->num_crtcs; 415 plane = mdp4_plane_init(dev, rgb_planes[i], true);
416 if (IS_ERR(plane)) {
417 dev_err(dev->dev,
418 "failed to construct plane for RGB%d\n", i + 1);
419 ret = PTR_ERR(plane);
420 goto fail;
421 }
422
423 crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, i,
424 mdp4_crtcs[i]);
425 if (IS_ERR(crtc)) {
426 dev_err(dev->dev, "failed to construct crtc for %s\n",
427 mdp4_crtc_names[i]);
428 ret = PTR_ERR(crtc);
429 goto fail;
430 }
431
432 priv->crtcs[priv->num_crtcs++] = crtc;
433 }
382 434
383 priv->crtcs[priv->num_crtcs++] = crtc; 435 /*
384 priv->encoders[priv->num_encoders++] = encoder; 436 * we currently set up two relatively fixed paths:
437 *
438 * LCDC/LVDS path: RGB1 -> DMA_P -> LCDC -> LVDS
439 * or
440 * DSI path: RGB1 -> DMA_P -> DSI1 -> DSI Panel
441 *
442 * DTV/HDMI path: RGB2 -> DMA_E -> DTV -> HDMI
443 */
385 444
386 if (priv->hdmi) { 445 for (i = 0; i < ARRAY_SIZE(mdp4_intfs); i++) {
387 /* Construct bridge/connector for HDMI: */ 446 ret = mdp4_modeset_init_intf(mdp4_kms, mdp4_intfs[i]);
388 ret = hdmi_modeset_init(priv->hdmi, dev, encoder);
389 if (ret) { 447 if (ret) {
390 dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret); 448 dev_err(dev->dev, "failed to initialize intf: %d, %d\n",
449 i, ret);
391 goto fail; 450 goto fail;
392 } 451 }
393 } 452 }
@@ -558,17 +617,10 @@ fail:
558static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev) 617static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
559{ 618{
560 static struct mdp4_platform_config config = {}; 619 static struct mdp4_platform_config config = {};
561#ifdef CONFIG_OF 620
562 /* TODO */ 621 /* TODO: Chips that aren't apq8064 have a 200 Mhz max_clk */
563 config.max_clk = 266667000; 622 config.max_clk = 266667000;
564 config.iommu = iommu_domain_alloc(&platform_bus_type); 623 config.iommu = iommu_domain_alloc(&platform_bus_type);
565#else 624
566 if (cpu_is_apq8064())
567 config.max_clk = 266667000;
568 else
569 config.max_clk = 200000000;
570
571 config.iommu = msm_get_iommu_domain(DISPLAY_READ_DOMAIN);
572#endif
573 return &config; 625 return &config;
574} 626}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index 8a7f6e1e2bca..d2c96ef431f4 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -157,7 +157,7 @@ static inline uint32_t mixercfg(uint32_t mixer_cfg, int mixer,
157 COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1); 157 COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1);
158 break; 158 break;
159 default: 159 default:
160 WARN_ON("invalid pipe"); 160 WARN(1, "invalid pipe");
161 break; 161 break;
162 } 162 }
163 163
@@ -212,10 +212,19 @@ struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev);
212 212
213long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate); 213long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate);
214struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev, 214struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
215 struct drm_panel *panel); 215 struct device_node *panel_node);
216 216
217struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev, 217struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
218 struct drm_panel *panel, struct drm_encoder *encoder); 218 struct device_node *panel_node, struct drm_encoder *encoder);
219
220#ifdef CONFIG_DRM_MSM_DSI
221struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev);
222#else
223static inline struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev)
224{
225 return ERR_PTR(-ENODEV);
226}
227#endif
219 228
220#ifdef CONFIG_COMMON_CLK 229#ifdef CONFIG_COMMON_CLK
221struct clk *mpd4_lvds_pll_init(struct drm_device *dev); 230struct clk *mpd4_lvds_pll_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
index 4cd6e721aa0a..cd63fedb67cc 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
@@ -23,6 +23,7 @@
23 23
24struct mdp4_lcdc_encoder { 24struct mdp4_lcdc_encoder {
25 struct drm_encoder base; 25 struct drm_encoder base;
26 struct device_node *panel_node;
26 struct drm_panel *panel; 27 struct drm_panel *panel;
27 struct clk *lcdc_clk; 28 struct clk *lcdc_clk;
28 unsigned long int pixclock; 29 unsigned long int pixclock;
@@ -338,7 +339,7 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
338 struct mdp4_lcdc_encoder *mdp4_lcdc_encoder = 339 struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
339 to_mdp4_lcdc_encoder(encoder); 340 to_mdp4_lcdc_encoder(encoder);
340 struct mdp4_kms *mdp4_kms = get_kms(encoder); 341 struct mdp4_kms *mdp4_kms = get_kms(encoder);
341 struct drm_panel *panel = mdp4_lcdc_encoder->panel; 342 struct drm_panel *panel;
342 int i, ret; 343 int i, ret;
343 344
344 if (WARN_ON(!mdp4_lcdc_encoder->enabled)) 345 if (WARN_ON(!mdp4_lcdc_encoder->enabled))
@@ -346,6 +347,7 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
346 347
347 mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0); 348 mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
348 349
350 panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
349 if (panel) { 351 if (panel) {
350 drm_panel_disable(panel); 352 drm_panel_disable(panel);
351 drm_panel_unprepare(panel); 353 drm_panel_unprepare(panel);
@@ -381,7 +383,7 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
381 to_mdp4_lcdc_encoder(encoder); 383 to_mdp4_lcdc_encoder(encoder);
382 unsigned long pc = mdp4_lcdc_encoder->pixclock; 384 unsigned long pc = mdp4_lcdc_encoder->pixclock;
383 struct mdp4_kms *mdp4_kms = get_kms(encoder); 385 struct mdp4_kms *mdp4_kms = get_kms(encoder);
384 struct drm_panel *panel = mdp4_lcdc_encoder->panel; 386 struct drm_panel *panel;
385 int i, ret; 387 int i, ret;
386 388
387 if (WARN_ON(mdp4_lcdc_encoder->enabled)) 389 if (WARN_ON(mdp4_lcdc_encoder->enabled))
@@ -414,6 +416,7 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
414 if (ret) 416 if (ret)
415 dev_err(dev->dev, "failed to enable lcdc_clk: %d\n", ret); 417 dev_err(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
416 418
419 panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
417 if (panel) { 420 if (panel) {
418 drm_panel_prepare(panel); 421 drm_panel_prepare(panel);
419 drm_panel_enable(panel); 422 drm_panel_enable(panel);
@@ -442,7 +445,7 @@ long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate)
442 445
443/* initialize encoder */ 446/* initialize encoder */
444struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev, 447struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
445 struct drm_panel *panel) 448 struct device_node *panel_node)
446{ 449{
447 struct drm_encoder *encoder = NULL; 450 struct drm_encoder *encoder = NULL;
448 struct mdp4_lcdc_encoder *mdp4_lcdc_encoder; 451 struct mdp4_lcdc_encoder *mdp4_lcdc_encoder;
@@ -455,12 +458,12 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
455 goto fail; 458 goto fail;
456 } 459 }
457 460
458 mdp4_lcdc_encoder->panel = panel; 461 mdp4_lcdc_encoder->panel_node = panel_node;
459 462
460 encoder = &mdp4_lcdc_encoder->base; 463 encoder = &mdp4_lcdc_encoder->base;
461 464
462 drm_encoder_init(dev, encoder, &mdp4_lcdc_encoder_funcs, 465 drm_encoder_init(dev, encoder, &mdp4_lcdc_encoder_funcs,
463 DRM_MODE_ENCODER_LVDS); 466 DRM_MODE_ENCODER_LVDS, NULL);
464 drm_encoder_helper_add(encoder, &mdp4_lcdc_encoder_helper_funcs); 467 drm_encoder_helper_add(encoder, &mdp4_lcdc_encoder_helper_funcs);
465 468
466 /* TODO: do we need different pll in other cases? */ 469 /* TODO: do we need different pll in other cases? */
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
index 921185133d38..e73e1742b250 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
@@ -23,6 +23,7 @@
23struct mdp4_lvds_connector { 23struct mdp4_lvds_connector {
24 struct drm_connector base; 24 struct drm_connector base;
25 struct drm_encoder *encoder; 25 struct drm_encoder *encoder;
26 struct device_node *panel_node;
26 struct drm_panel *panel; 27 struct drm_panel *panel;
27}; 28};
28#define to_mdp4_lvds_connector(x) container_of(x, struct mdp4_lvds_connector, base) 29#define to_mdp4_lvds_connector(x) container_of(x, struct mdp4_lvds_connector, base)
@@ -33,6 +34,10 @@ static enum drm_connector_status mdp4_lvds_connector_detect(
33 struct mdp4_lvds_connector *mdp4_lvds_connector = 34 struct mdp4_lvds_connector *mdp4_lvds_connector =
34 to_mdp4_lvds_connector(connector); 35 to_mdp4_lvds_connector(connector);
35 36
37 if (!mdp4_lvds_connector->panel)
38 mdp4_lvds_connector->panel =
39 of_drm_find_panel(mdp4_lvds_connector->panel_node);
40
36 return mdp4_lvds_connector->panel ? 41 return mdp4_lvds_connector->panel ?
37 connector_status_connected : 42 connector_status_connected :
38 connector_status_disconnected; 43 connector_status_disconnected;
@@ -42,10 +47,6 @@ static void mdp4_lvds_connector_destroy(struct drm_connector *connector)
42{ 47{
43 struct mdp4_lvds_connector *mdp4_lvds_connector = 48 struct mdp4_lvds_connector *mdp4_lvds_connector =
44 to_mdp4_lvds_connector(connector); 49 to_mdp4_lvds_connector(connector);
45 struct drm_panel *panel = mdp4_lvds_connector->panel;
46
47 if (panel)
48 drm_panel_detach(panel);
49 50
50 drm_connector_unregister(connector); 51 drm_connector_unregister(connector);
51 drm_connector_cleanup(connector); 52 drm_connector_cleanup(connector);
@@ -60,9 +61,14 @@ static int mdp4_lvds_connector_get_modes(struct drm_connector *connector)
60 struct drm_panel *panel = mdp4_lvds_connector->panel; 61 struct drm_panel *panel = mdp4_lvds_connector->panel;
61 int ret = 0; 62 int ret = 0;
62 63
63 if (panel) 64 if (panel) {
65 drm_panel_attach(panel, connector);
66
64 ret = panel->funcs->get_modes(panel); 67 ret = panel->funcs->get_modes(panel);
65 68
69 drm_panel_detach(panel);
70 }
71
66 return ret; 72 return ret;
67} 73}
68 74
@@ -111,7 +117,7 @@ static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs
111 117
112/* initialize connector */ 118/* initialize connector */
113struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev, 119struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
114 struct drm_panel *panel, struct drm_encoder *encoder) 120 struct device_node *panel_node, struct drm_encoder *encoder)
115{ 121{
116 struct drm_connector *connector = NULL; 122 struct drm_connector *connector = NULL;
117 struct mdp4_lvds_connector *mdp4_lvds_connector; 123 struct mdp4_lvds_connector *mdp4_lvds_connector;
@@ -124,7 +130,7 @@ struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
124 } 130 }
125 131
126 mdp4_lvds_connector->encoder = encoder; 132 mdp4_lvds_connector->encoder = encoder;
127 mdp4_lvds_connector->panel = panel; 133 mdp4_lvds_connector->panel_node = panel_node;
128 134
129 connector = &mdp4_lvds_connector->base; 135 connector = &mdp4_lvds_connector->base;
130 136
@@ -141,9 +147,6 @@ struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
141 147
142 drm_mode_connector_attach_encoder(connector, encoder); 148 drm_mode_connector_attach_encoder(connector, encoder);
143 149
144 if (panel)
145 drm_panel_attach(panel, connector);
146
147 return connector; 150 return connector;
148 151
149fail: 152fail:
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 30d57e74c42f..9f96dfe67769 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -397,7 +397,8 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev,
397 397
398 type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; 398 type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
399 ret = drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs, 399 ret = drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
400 mdp4_plane->formats, mdp4_plane->nformats, type); 400 mdp4_plane->formats, mdp4_plane->nformats,
401 type, NULL);
401 if (ret) 402 if (ret)
402 goto fail; 403 goto fail;
403 404
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index bb1225aa2f75..57f73f0c120d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -553,9 +553,7 @@ fail:
553static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev) 553static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev)
554{ 554{
555 static struct mdp5_cfg_platform config = {}; 555 static struct mdp5_cfg_platform config = {};
556#ifdef CONFIG_OF 556
557 /* TODO */
558#endif
559 config.iommu = iommu_domain_alloc(&platform_bus_type); 557 config.iommu = iommu_domain_alloc(&platform_bus_type);
560 558
561 return &config; 559 return &config;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
index 8e6c9b598a57..1aa21dba663d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
@@ -326,7 +326,7 @@ struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
326 mdp5_cmd_enc->ctl = ctl; 326 mdp5_cmd_enc->ctl = ctl;
327 327
328 drm_encoder_init(dev, encoder, &mdp5_cmd_encoder_funcs, 328 drm_encoder_init(dev, encoder, &mdp5_cmd_encoder_funcs,
329 DRM_MODE_ENCODER_DSI); 329 DRM_MODE_ENCODER_DSI, NULL);
330 330
331 drm_encoder_helper_add(encoder, &mdp5_cmd_encoder_helper_funcs); 331 drm_encoder_helper_add(encoder, &mdp5_cmd_encoder_helper_funcs);
332 332
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 7f9f4ac88029..20cee5ce4071 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -797,7 +797,8 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
797 snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d", 797 snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d",
798 pipe2name(mdp5_plane_pipe(plane)), id); 798 pipe2name(mdp5_plane_pipe(plane)), id);
799 799
800 drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs); 800 drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs,
801 NULL);
801 802
802 drm_flip_work_init(&mdp5_crtc->unref_cursor_work, 803 drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
803 "unref cursor", unref_cursor_worker); 804 "unref cursor", unref_cursor_worker);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index c9e32b08a7a0..0d737cad03a6 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -293,6 +293,24 @@ static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
293 .enable = mdp5_encoder_enable, 293 .enable = mdp5_encoder_enable,
294}; 294};
295 295
296int mdp5_encoder_get_linecount(struct drm_encoder *encoder)
297{
298 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
299 struct mdp5_kms *mdp5_kms = get_kms(encoder);
300 int intf = mdp5_encoder->intf.num;
301
302 return mdp5_read(mdp5_kms, REG_MDP5_INTF_LINE_COUNT(intf));
303}
304
305u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder)
306{
307 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
308 struct mdp5_kms *mdp5_kms = get_kms(encoder);
309 int intf = mdp5_encoder->intf.num;
310
311 return mdp5_read(mdp5_kms, REG_MDP5_INTF_FRAME_COUNT(intf));
312}
313
296int mdp5_encoder_set_split_display(struct drm_encoder *encoder, 314int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
297 struct drm_encoder *slave_encoder) 315 struct drm_encoder *slave_encoder)
298{ 316{
@@ -354,7 +372,7 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
354 372
355 spin_lock_init(&mdp5_encoder->intf_lock); 373 spin_lock_init(&mdp5_encoder->intf_lock);
356 374
357 drm_encoder_init(dev, encoder, &mdp5_encoder_funcs, enc_type); 375 drm_encoder_init(dev, encoder, &mdp5_encoder_funcs, enc_type, NULL);
358 376
359 drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs); 377 drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs);
360 378
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index b0d4b53b97f4..73bc3e312fd4 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -31,7 +31,7 @@ void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
31 31
32static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus) 32static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
33{ 33{
34 DRM_ERROR("errors: %08x\n", irqstatus); 34 DRM_ERROR_RATELIMITED("errors: %08x\n", irqstatus);
35} 35}
36 36
37void mdp5_irq_preinstall(struct msm_kms *kms) 37void mdp5_irq_preinstall(struct msm_kms *kms)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index b532faa8026d..e115318402bd 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -468,6 +468,127 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
468 return 0; 468 return 0;
469} 469}
470 470
471static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
472{
473 struct drm_device *dev = crtc->dev;
474 struct drm_encoder *encoder;
475
476 drm_for_each_encoder(encoder, dev)
477 if (encoder->crtc == crtc)
478 return encoder;
479
480 return NULL;
481}
482
483static int mdp5_get_scanoutpos(struct drm_device *dev, unsigned int pipe,
484 unsigned int flags, int *vpos, int *hpos,
485 ktime_t *stime, ktime_t *etime,
486 const struct drm_display_mode *mode)
487{
488 struct msm_drm_private *priv = dev->dev_private;
489 struct drm_crtc *crtc;
490 struct drm_encoder *encoder;
491 int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
492 int ret = 0;
493
494 crtc = priv->crtcs[pipe];
495 if (!crtc) {
496 DRM_ERROR("Invalid crtc %d\n", pipe);
497 return 0;
498 }
499
500 encoder = get_encoder_from_crtc(crtc);
501 if (!encoder) {
502 DRM_ERROR("no encoder found for crtc %d\n", pipe);
503 return 0;
504 }
505
506 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
507
508 vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
509 vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
510
511 /*
512 * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
513 * the end of VFP. Translate the porch values relative to the line
514 * counter positions.
515 */
516
517 vactive_start = vsw + vbp + 1;
518
519 vactive_end = vactive_start + mode->crtc_vdisplay;
520
521 /* last scan line before VSYNC */
522 vfp_end = mode->crtc_vtotal;
523
524 if (stime)
525 *stime = ktime_get();
526
527 line = mdp5_encoder_get_linecount(encoder);
528
529 if (line < vactive_start) {
530 line -= vactive_start;
531 ret |= DRM_SCANOUTPOS_IN_VBLANK;
532 } else if (line > vactive_end) {
533 line = line - vfp_end - vactive_start;
534 ret |= DRM_SCANOUTPOS_IN_VBLANK;
535 } else {
536 line -= vactive_start;
537 }
538
539 *vpos = line;
540 *hpos = 0;
541
542 if (etime)
543 *etime = ktime_get();
544
545 return ret;
546}
547
548static int mdp5_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
549 int *max_error,
550 struct timeval *vblank_time,
551 unsigned flags)
552{
553 struct msm_drm_private *priv = dev->dev_private;
554 struct drm_crtc *crtc;
555
556 if (pipe < 0 || pipe >= priv->num_crtcs) {
557 DRM_ERROR("Invalid crtc %d\n", pipe);
558 return -EINVAL;
559 }
560
561 crtc = priv->crtcs[pipe];
562 if (!crtc) {
563 DRM_ERROR("Invalid crtc %d\n", pipe);
564 return -EINVAL;
565 }
566
567 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
568 vblank_time, flags,
569 &crtc->mode);
570}
571
572static u32 mdp5_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
573{
574 struct msm_drm_private *priv = dev->dev_private;
575 struct drm_crtc *crtc;
576 struct drm_encoder *encoder;
577
578 if (pipe < 0 || pipe >= priv->num_crtcs)
579 return 0;
580
581 crtc = priv->crtcs[pipe];
582 if (!crtc)
583 return 0;
584
585 encoder = get_encoder_from_crtc(crtc);
586 if (!encoder)
587 return 0;
588
589 return mdp5_encoder_get_framecount(encoder);
590}
591
471struct msm_kms *mdp5_kms_init(struct drm_device *dev) 592struct msm_kms *mdp5_kms_init(struct drm_device *dev)
472{ 593{
473 struct platform_device *pdev = dev->platformdev; 594 struct platform_device *pdev = dev->platformdev;
@@ -590,6 +711,8 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
590 !config->hw->intf.base[i]) 711 !config->hw->intf.base[i])
591 continue; 712 continue;
592 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0); 713 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
714
715 mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3);
593 } 716 }
594 mdp5_disable(mdp5_kms); 717 mdp5_disable(mdp5_kms);
595 mdelay(16); 718 mdelay(16);
@@ -635,6 +758,12 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
635 dev->mode_config.max_width = config->hw->lm.max_width; 758 dev->mode_config.max_width = config->hw->lm.max_width;
636 dev->mode_config.max_height = config->hw->lm.max_height; 759 dev->mode_config.max_height = config->hw->lm.max_height;
637 760
761 dev->driver->get_vblank_timestamp = mdp5_get_vblank_timestamp;
762 dev->driver->get_scanout_position = mdp5_get_scanoutpos;
763 dev->driver->get_vblank_counter = mdp5_get_vblank_counter;
764 dev->max_vblank_count = 0xffffffff;
765 dev->vblank_disable_immediate = true;
766
638 return kms; 767 return kms;
639 768
640fail: 769fail:
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 84f65d415598..00730ba08a60 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -222,6 +222,8 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
222 struct mdp5_interface *intf, struct mdp5_ctl *ctl); 222 struct mdp5_interface *intf, struct mdp5_ctl *ctl);
223int mdp5_encoder_set_split_display(struct drm_encoder *encoder, 223int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
224 struct drm_encoder *slave_encoder); 224 struct drm_encoder *slave_encoder);
225int mdp5_encoder_get_linecount(struct drm_encoder *encoder);
226u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder);
225 227
226#ifdef CONFIG_DRM_MSM_DSI 228#ifdef CONFIG_DRM_MSM_DSI
227struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev, 229struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 81cd49045ffc..432c09836b0e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -904,7 +904,7 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
904 type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; 904 type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
905 ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs, 905 ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
906 mdp5_plane->formats, mdp5_plane->nformats, 906 mdp5_plane->formats, mdp5_plane->nformats,
907 type); 907 type, NULL);
908 if (ret) 908 if (ret)
909 goto fail; 909 goto fail;
910 910
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index b88ce514eb8e..9a30807b900b 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -237,20 +237,9 @@ static int msm_unload(struct drm_device *dev)
237 237
238static int get_mdp_ver(struct platform_device *pdev) 238static int get_mdp_ver(struct platform_device *pdev)
239{ 239{
240#ifdef CONFIG_OF
241 static const struct of_device_id match_types[] = { {
242 .compatible = "qcom,mdss_mdp",
243 .data = (void *)5,
244 }, {
245 /* end node */
246 } };
247 struct device *dev = &pdev->dev; 240 struct device *dev = &pdev->dev;
248 const struct of_device_id *match; 241
249 match = of_match_node(match_types, dev->of_node); 242 return (int) (unsigned long) of_device_get_match_data(dev);
250 if (match)
251 return (int)(unsigned long)match->data;
252#endif
253 return 4;
254} 243}
255 244
256#include <linux/of_address.h> 245#include <linux/of_address.h>
@@ -258,10 +247,10 @@ static int get_mdp_ver(struct platform_device *pdev)
258static int msm_init_vram(struct drm_device *dev) 247static int msm_init_vram(struct drm_device *dev)
259{ 248{
260 struct msm_drm_private *priv = dev->dev_private; 249 struct msm_drm_private *priv = dev->dev_private;
250 struct device_node *node;
261 unsigned long size = 0; 251 unsigned long size = 0;
262 int ret = 0; 252 int ret = 0;
263 253
264#ifdef CONFIG_OF
265 /* In the device-tree world, we could have a 'memory-region' 254 /* In the device-tree world, we could have a 'memory-region'
266 * phandle, which gives us a link to our "vram". Allocating 255 * phandle, which gives us a link to our "vram". Allocating
267 * is all nicely abstracted behind the dma api, but we need 256 * is all nicely abstracted behind the dma api, but we need
@@ -278,7 +267,6 @@ static int msm_init_vram(struct drm_device *dev)
278 * as corruption on screen before we have a chance to 267 * as corruption on screen before we have a chance to
279 * load and do initial modeset) 268 * load and do initial modeset)
280 */ 269 */
281 struct device_node *node;
282 270
283 node = of_parse_phandle(dev->dev->of_node, "memory-region", 0); 271 node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
284 if (node) { 272 if (node) {
@@ -288,14 +276,12 @@ static int msm_init_vram(struct drm_device *dev)
288 return ret; 276 return ret;
289 size = r.end - r.start; 277 size = r.end - r.start;
290 DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start); 278 DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
291 } else
292#endif
293 279
294 /* if we have no IOMMU, then we need to use carveout allocator. 280 /* if we have no IOMMU, then we need to use carveout allocator.
295 * Grab the entire CMA chunk carved out in early startup in 281 * Grab the entire CMA chunk carved out in early startup in
296 * mach-msm: 282 * mach-msm:
297 */ 283 */
298 if (!iommu_present(&platform_bus_type)) { 284 } else if (!iommu_present(&platform_bus_type)) {
299 DRM_INFO("using %s VRAM carveout\n", vram); 285 DRM_INFO("using %s VRAM carveout\n", vram);
300 size = memparse(vram, NULL); 286 size = memparse(vram, NULL);
301 } 287 }
@@ -1035,9 +1021,9 @@ static const struct dev_pm_ops msm_pm_ops = {
1035 * Componentized driver support: 1021 * Componentized driver support:
1036 */ 1022 */
1037 1023
1038#ifdef CONFIG_OF 1024/*
1039/* NOTE: the CONFIG_OF case duplicates the same code as exynos or imx 1025 * NOTE: duplication of the same code as exynos or imx (or probably any other).
1040 * (or probably any other).. so probably some room for some helpers 1026 * so probably some room for some helpers
1041 */ 1027 */
1042static int compare_of(struct device *dev, void *data) 1028static int compare_of(struct device *dev, void *data)
1043{ 1029{
@@ -1062,12 +1048,6 @@ static int add_components(struct device *dev, struct component_match **matchptr,
1062 1048
1063 return 0; 1049 return 0;
1064} 1050}
1065#else
1066static int compare_dev(struct device *dev, void *data)
1067{
1068 return dev == data;
1069}
1070#endif
1071 1051
1072static int msm_drm_bind(struct device *dev) 1052static int msm_drm_bind(struct device *dev)
1073{ 1053{
@@ -1091,35 +1071,9 @@ static const struct component_master_ops msm_drm_ops = {
1091static int msm_pdev_probe(struct platform_device *pdev) 1071static int msm_pdev_probe(struct platform_device *pdev)
1092{ 1072{
1093 struct component_match *match = NULL; 1073 struct component_match *match = NULL;
1094#ifdef CONFIG_OF 1074
1095 add_components(&pdev->dev, &match, "connectors"); 1075 add_components(&pdev->dev, &match, "connectors");
1096 add_components(&pdev->dev, &match, "gpus"); 1076 add_components(&pdev->dev, &match, "gpus");
1097#else
1098 /* For non-DT case, it kinda sucks. We don't actually have a way
1099 * to know whether or not we are waiting for certain devices (or if
1100 * they are simply not present). But for non-DT we only need to
1101 * care about apq8064/apq8060/etc (all mdp4/a3xx):
1102 */
1103 static const char *devnames[] = {
1104 "hdmi_msm.0", "kgsl-3d0.0",
1105 };
1106 int i;
1107
1108 DBG("Adding components..");
1109
1110 for (i = 0; i < ARRAY_SIZE(devnames); i++) {
1111 struct device *dev;
1112
1113 dev = bus_find_device_by_name(&platform_bus_type,
1114 NULL, devnames[i]);
1115 if (!dev) {
1116 dev_info(&pdev->dev, "still waiting for %s\n", devnames[i]);
1117 return -EPROBE_DEFER;
1118 }
1119
1120 component_match_add(&pdev->dev, &match, compare_dev, dev);
1121 }
1122#endif
1123 1077
1124 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 1078 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
1125 return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match); 1079 return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
@@ -1138,8 +1092,10 @@ static const struct platform_device_id msm_id[] = {
1138}; 1092};
1139 1093
1140static const struct of_device_id dt_match[] = { 1094static const struct of_device_id dt_match[] = {
1141 { .compatible = "qcom,mdp" }, /* mdp4 */ 1095 { .compatible = "qcom,mdp4", .data = (void *) 4 }, /* mdp4 */
1142 { .compatible = "qcom,mdss_mdp" }, /* mdp5 */ 1096 { .compatible = "qcom,mdp5", .data = (void *) 5 }, /* mdp5 */
1097 /* to support downstream DT files */
1098 { .compatible = "qcom,mdss_mdp", .data = (void *) 5 }, /* mdp5 */
1143 {} 1099 {}
1144}; 1100};
1145MODULE_DEVICE_TABLE(of, dt_match); 1101MODULE_DEVICE_TABLE(of, dt_match);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 3be7a56b14f1..c1e7bba2fdb7 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -31,14 +31,9 @@
31#include <linux/iommu.h> 31#include <linux/iommu.h>
32#include <linux/types.h> 32#include <linux/types.h>
33#include <linux/of_graph.h> 33#include <linux/of_graph.h>
34#include <linux/of_device.h>
34#include <asm/sizes.h> 35#include <asm/sizes.h>
35 36
36#ifndef CONFIG_OF
37#include <mach/board.h>
38#include <mach/socinfo.h>
39#include <mach/iommu_domains.h>
40#endif
41
42#include <drm/drmP.h> 37#include <drm/drmP.h>
43#include <drm/drm_atomic.h> 38#include <drm/drm_atomic.h>
44#include <drm/drm_atomic_helper.h> 39#include <drm/drm_atomic_helper.h>
@@ -240,9 +235,9 @@ uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane);
240struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane); 235struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
241const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb); 236const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
242struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, 237struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
243 struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos); 238 const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
244struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev, 239struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
245 struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd); 240 struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd);
246 241
247struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev); 242struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
248 243
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 121713281417..a474d6cf5d9f 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -138,7 +138,7 @@ const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb)
138} 138}
139 139
140struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev, 140struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
141 struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd) 141 struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd)
142{ 142{
143 struct drm_gem_object *bos[4] = {0}; 143 struct drm_gem_object *bos[4] = {0};
144 struct drm_framebuffer *fb; 144 struct drm_framebuffer *fb;
@@ -168,7 +168,7 @@ out_unref:
168} 168}
169 169
170struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, 170struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
171 struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos) 171 const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
172{ 172{
173 struct msm_drm_private *priv = dev->dev_private; 173 struct msm_drm_private *priv = dev->dev_private;
174 struct msm_kms *kms = priv->kms; 174 struct msm_kms *kms = priv->kms;
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 3f6ec077b51d..d95af6eba602 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -121,7 +121,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
121 /* note: if fb creation failed, we can't rely on fb destroy 121 /* note: if fb creation failed, we can't rely on fb destroy
122 * to unref the bo: 122 * to unref the bo:
123 */ 123 */
124 drm_gem_object_unreference(fbdev->bo); 124 drm_gem_object_unreference_unlocked(fbdev->bo);
125 ret = PTR_ERR(fb); 125 ret = PTR_ERR(fb);
126 goto fail; 126 goto fail;
127 } 127 }
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 3d96b49fe662..6f04397d43a7 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -1081,8 +1081,6 @@ nouveau_crtc_set_config(struct drm_mode_set *set)
1081} 1081}
1082 1082
1083static const struct drm_crtc_funcs nv04_crtc_funcs = { 1083static const struct drm_crtc_funcs nv04_crtc_funcs = {
1084 .save = nv_crtc_save,
1085 .restore = nv_crtc_restore,
1086 .cursor_set = nv04_crtc_cursor_set, 1084 .cursor_set = nv04_crtc_cursor_set,
1087 .cursor_move = nv04_crtc_cursor_move, 1085 .cursor_move = nv04_crtc_cursor_move,
1088 .gamma_set = nv_crtc_gamma_set, 1086 .gamma_set = nv_crtc_gamma_set,
@@ -1123,6 +1121,9 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
1123 nv_crtc->index = crtc_num; 1121 nv_crtc->index = crtc_num;
1124 nv_crtc->last_dpms = NV_DPMS_CLEARED; 1122 nv_crtc->last_dpms = NV_DPMS_CLEARED;
1125 1123
1124 nv_crtc->save = nv_crtc_save;
1125 nv_crtc->restore = nv_crtc_restore;
1126
1126 drm_crtc_init(dev, &nv_crtc->base, &nv04_crtc_funcs); 1127 drm_crtc_init(dev, &nv_crtc->base, &nv04_crtc_funcs);
1127 drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs); 1128 drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs);
1128 drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); 1129 drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c
index 78cb033bc015..b48eec395f07 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dac.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c
@@ -504,8 +504,6 @@ static void nv04_dac_destroy(struct drm_encoder *encoder)
504 504
505static const struct drm_encoder_helper_funcs nv04_dac_helper_funcs = { 505static const struct drm_encoder_helper_funcs nv04_dac_helper_funcs = {
506 .dpms = nv04_dac_dpms, 506 .dpms = nv04_dac_dpms,
507 .save = nv04_dac_save,
508 .restore = nv04_dac_restore,
509 .mode_fixup = nv04_dac_mode_fixup, 507 .mode_fixup = nv04_dac_mode_fixup,
510 .prepare = nv04_dac_prepare, 508 .prepare = nv04_dac_prepare,
511 .commit = nv04_dac_commit, 509 .commit = nv04_dac_commit,
@@ -515,8 +513,6 @@ static const struct drm_encoder_helper_funcs nv04_dac_helper_funcs = {
515 513
516static const struct drm_encoder_helper_funcs nv17_dac_helper_funcs = { 514static const struct drm_encoder_helper_funcs nv17_dac_helper_funcs = {
517 .dpms = nv04_dac_dpms, 515 .dpms = nv04_dac_dpms,
518 .save = nv04_dac_save,
519 .restore = nv04_dac_restore,
520 .mode_fixup = nv04_dac_mode_fixup, 516 .mode_fixup = nv04_dac_mode_fixup,
521 .prepare = nv04_dac_prepare, 517 .prepare = nv04_dac_prepare,
522 .commit = nv04_dac_commit, 518 .commit = nv04_dac_commit,
@@ -545,12 +541,16 @@ nv04_dac_create(struct drm_connector *connector, struct dcb_output *entry)
545 nv_encoder->dcb = entry; 541 nv_encoder->dcb = entry;
546 nv_encoder->or = ffs(entry->or) - 1; 542 nv_encoder->or = ffs(entry->or) - 1;
547 543
544 nv_encoder->enc_save = nv04_dac_save;
545 nv_encoder->enc_restore = nv04_dac_restore;
546
548 if (nv_gf4_disp_arch(dev)) 547 if (nv_gf4_disp_arch(dev))
549 helper = &nv17_dac_helper_funcs; 548 helper = &nv17_dac_helper_funcs;
550 else 549 else
551 helper = &nv04_dac_helper_funcs; 550 helper = &nv04_dac_helper_funcs;
552 551
553 drm_encoder_init(dev, encoder, &nv04_dac_funcs, DRM_MODE_ENCODER_DAC); 552 drm_encoder_init(dev, encoder, &nv04_dac_funcs, DRM_MODE_ENCODER_DAC,
553 NULL);
554 drm_encoder_helper_add(encoder, helper); 554 drm_encoder_helper_add(encoder, helper);
555 555
556 encoder->possible_crtcs = entry->heads; 556 encoder->possible_crtcs = entry->heads;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 429ab5e3025a..05bfd151d1d8 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -652,8 +652,6 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
652 652
653static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = { 653static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = {
654 .dpms = nv04_lvds_dpms, 654 .dpms = nv04_lvds_dpms,
655 .save = nv04_dfp_save,
656 .restore = nv04_dfp_restore,
657 .mode_fixup = nv04_dfp_mode_fixup, 655 .mode_fixup = nv04_dfp_mode_fixup,
658 .prepare = nv04_dfp_prepare, 656 .prepare = nv04_dfp_prepare,
659 .commit = nv04_dfp_commit, 657 .commit = nv04_dfp_commit,
@@ -663,8 +661,6 @@ static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = {
663 661
664static const struct drm_encoder_helper_funcs nv04_tmds_helper_funcs = { 662static const struct drm_encoder_helper_funcs nv04_tmds_helper_funcs = {
665 .dpms = nv04_tmds_dpms, 663 .dpms = nv04_tmds_dpms,
666 .save = nv04_dfp_save,
667 .restore = nv04_dfp_restore,
668 .mode_fixup = nv04_dfp_mode_fixup, 664 .mode_fixup = nv04_dfp_mode_fixup,
669 .prepare = nv04_dfp_prepare, 665 .prepare = nv04_dfp_prepare,
670 .commit = nv04_dfp_commit, 666 .commit = nv04_dfp_commit,
@@ -701,12 +697,15 @@ nv04_dfp_create(struct drm_connector *connector, struct dcb_output *entry)
701 if (!nv_encoder) 697 if (!nv_encoder)
702 return -ENOMEM; 698 return -ENOMEM;
703 699
700 nv_encoder->enc_save = nv04_dfp_save;
701 nv_encoder->enc_restore = nv04_dfp_restore;
702
704 encoder = to_drm_encoder(nv_encoder); 703 encoder = to_drm_encoder(nv_encoder);
705 704
706 nv_encoder->dcb = entry; 705 nv_encoder->dcb = entry;
707 nv_encoder->or = ffs(entry->or) - 1; 706 nv_encoder->or = ffs(entry->or) - 1;
708 707
709 drm_encoder_init(connector->dev, encoder, &nv04_dfp_funcs, type); 708 drm_encoder_init(connector->dev, encoder, &nv04_dfp_funcs, type, NULL);
710 drm_encoder_helper_add(encoder, helper); 709 drm_encoder_helper_add(encoder, helper);
711 710
712 encoder->possible_crtcs = entry->heads; 711 encoder->possible_crtcs = entry->heads;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 9e650081c357..b4a6bc433ef5 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -39,7 +39,8 @@ nv04_display_create(struct drm_device *dev)
39 struct dcb_table *dcb = &drm->vbios.dcb; 39 struct dcb_table *dcb = &drm->vbios.dcb;
40 struct drm_connector *connector, *ct; 40 struct drm_connector *connector, *ct;
41 struct drm_encoder *encoder; 41 struct drm_encoder *encoder;
42 struct drm_crtc *crtc; 42 struct nouveau_encoder *nv_encoder;
43 struct nouveau_crtc *crtc;
43 struct nv04_display *disp; 44 struct nv04_display *disp;
44 int i, ret; 45 int i, ret;
45 46
@@ -107,14 +108,11 @@ nv04_display_create(struct drm_device *dev)
107 } 108 }
108 109
109 /* Save previous state */ 110 /* Save previous state */
110 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 111 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
111 crtc->funcs->save(crtc); 112 crtc->save(&crtc->base);
112
113 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
114 const struct drm_encoder_helper_funcs *func = encoder->helper_private;
115 113
116 func->save(encoder); 114 list_for_each_entry(nv_encoder, &dev->mode_config.encoder_list, base.base.head)
117 } 115 nv_encoder->enc_save(&nv_encoder->base.base);
118 116
119 nouveau_overlay_init(dev); 117 nouveau_overlay_init(dev);
120 118
@@ -126,8 +124,9 @@ nv04_display_destroy(struct drm_device *dev)
126{ 124{
127 struct nv04_display *disp = nv04_display(dev); 125 struct nv04_display *disp = nv04_display(dev);
128 struct nouveau_drm *drm = nouveau_drm(dev); 126 struct nouveau_drm *drm = nouveau_drm(dev);
129 struct drm_encoder *encoder; 127 struct nouveau_encoder *encoder;
130 struct drm_crtc *crtc; 128 struct drm_crtc *crtc;
129 struct nouveau_crtc *nv_crtc;
131 130
132 /* Turn every CRTC off. */ 131 /* Turn every CRTC off. */
133 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 132 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -139,14 +138,11 @@ nv04_display_destroy(struct drm_device *dev)
139 } 138 }
140 139
141 /* Restore state */ 140 /* Restore state */
142 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 141 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.base.head)
143 const struct drm_encoder_helper_funcs *func = encoder->helper_private; 142 encoder->enc_restore(&encoder->base.base);
144 143
145 func->restore(encoder); 144 list_for_each_entry(nv_crtc, &dev->mode_config.crtc_list, base.head)
146 } 145 nv_crtc->restore(&nv_crtc->base);
147
148 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
149 crtc->funcs->restore(crtc);
150 146
151 nouveau_hw_save_vga_fonts(dev, 0); 147 nouveau_hw_save_vga_fonts(dev, 0);
152 148
@@ -159,8 +155,8 @@ nv04_display_destroy(struct drm_device *dev)
159int 155int
160nv04_display_init(struct drm_device *dev) 156nv04_display_init(struct drm_device *dev)
161{ 157{
162 struct drm_encoder *encoder; 158 struct nouveau_encoder *encoder;
163 struct drm_crtc *crtc; 159 struct nouveau_crtc *crtc;
164 160
165 /* meh.. modeset apparently doesn't setup all the regs and depends 161 /* meh.. modeset apparently doesn't setup all the regs and depends
166 * on pre-existing state, for now load the state of the card *before* 162 * on pre-existing state, for now load the state of the card *before*
@@ -170,14 +166,11 @@ nv04_display_init(struct drm_device *dev)
170 * save/restore "pre-load" state, but more general so we can save 166 * save/restore "pre-load" state, but more general so we can save
171 * on suspend too. 167 * on suspend too.
172 */ 168 */
173 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 169 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
174 const struct drm_encoder_helper_funcs *func = encoder->helper_private; 170 crtc->save(&crtc->base);
175
176 func->restore(encoder);
177 }
178 171
179 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 172 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.base.head)
180 crtc->funcs->restore(crtc); 173 encoder->enc_save(&encoder->base.base);
181 174
182 return 0; 175 return 0;
183} 176}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index 5345eb5378a8..54e9fb9eb5c0 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -192,8 +192,6 @@ static const struct drm_encoder_funcs nv04_tv_funcs = {
192 192
193static const struct drm_encoder_helper_funcs nv04_tv_helper_funcs = { 193static const struct drm_encoder_helper_funcs nv04_tv_helper_funcs = {
194 .dpms = nv04_tv_dpms, 194 .dpms = nv04_tv_dpms,
195 .save = drm_i2c_encoder_save,
196 .restore = drm_i2c_encoder_restore,
197 .mode_fixup = drm_i2c_encoder_mode_fixup, 195 .mode_fixup = drm_i2c_encoder_mode_fixup,
198 .prepare = nv04_tv_prepare, 196 .prepare = nv04_tv_prepare,
199 .commit = nv04_tv_commit, 197 .commit = nv04_tv_commit,
@@ -225,9 +223,13 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
225 /* Initialize the common members */ 223 /* Initialize the common members */
226 encoder = to_drm_encoder(nv_encoder); 224 encoder = to_drm_encoder(nv_encoder);
227 225
228 drm_encoder_init(dev, encoder, &nv04_tv_funcs, DRM_MODE_ENCODER_TVDAC); 226 drm_encoder_init(dev, encoder, &nv04_tv_funcs, DRM_MODE_ENCODER_TVDAC,
227 NULL);
229 drm_encoder_helper_add(encoder, &nv04_tv_helper_funcs); 228 drm_encoder_helper_add(encoder, &nv04_tv_helper_funcs);
230 229
230 nv_encoder->enc_save = drm_i2c_encoder_save;
231 nv_encoder->enc_restore = drm_i2c_encoder_restore;
232
231 encoder->possible_crtcs = entry->heads; 233 encoder->possible_crtcs = entry->heads;
232 encoder->possible_clones = 0; 234 encoder->possible_clones = 0;
233 nv_encoder->dcb = entry; 235 nv_encoder->dcb = entry;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index b734195d80a0..d9644c0c5a83 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -771,8 +771,6 @@ static void nv17_tv_destroy(struct drm_encoder *encoder)
771 771
772static struct drm_encoder_helper_funcs nv17_tv_helper_funcs = { 772static struct drm_encoder_helper_funcs nv17_tv_helper_funcs = {
773 .dpms = nv17_tv_dpms, 773 .dpms = nv17_tv_dpms,
774 .save = nv17_tv_save,
775 .restore = nv17_tv_restore,
776 .mode_fixup = nv17_tv_mode_fixup, 774 .mode_fixup = nv17_tv_mode_fixup,
777 .prepare = nv17_tv_prepare, 775 .prepare = nv17_tv_prepare,
778 .commit = nv17_tv_commit, 776 .commit = nv17_tv_commit,
@@ -816,10 +814,14 @@ nv17_tv_create(struct drm_connector *connector, struct dcb_output *entry)
816 tv_enc->base.dcb = entry; 814 tv_enc->base.dcb = entry;
817 tv_enc->base.or = ffs(entry->or) - 1; 815 tv_enc->base.or = ffs(entry->or) - 1;
818 816
819 drm_encoder_init(dev, encoder, &nv17_tv_funcs, DRM_MODE_ENCODER_TVDAC); 817 drm_encoder_init(dev, encoder, &nv17_tv_funcs, DRM_MODE_ENCODER_TVDAC,
818 NULL);
820 drm_encoder_helper_add(encoder, &nv17_tv_helper_funcs); 819 drm_encoder_helper_add(encoder, &nv17_tv_helper_funcs);
821 to_encoder_slave(encoder)->slave_funcs = &nv17_tv_slave_funcs; 820 to_encoder_slave(encoder)->slave_funcs = &nv17_tv_slave_funcs;
822 821
822 tv_enc->base.enc_save = nv17_tv_save;
823 tv_enc->base.enc_restore = nv17_tv_restore;
824
823 encoder->possible_crtcs = entry->heads; 825 encoder->possible_crtcs = entry->heads;
824 encoder->possible_clones = 0; 826 encoder->possible_clones = 0;
825 827
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
index 28bc202f9753..40f845e31272 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
@@ -7,6 +7,7 @@ struct nvkm_instmem {
7 const struct nvkm_instmem_func *func; 7 const struct nvkm_instmem_func *func;
8 struct nvkm_subdev subdev; 8 struct nvkm_subdev subdev;
9 9
10 spinlock_t lock;
10 struct list_head list; 11 struct list_head list;
11 u32 reserved; 12 u32 reserved;
12 13
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 8b8332e46f24..d5e6938cc6bc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -367,6 +367,7 @@ static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios,
367 return -ENODEV; 367 return -ENODEV;
368 } 368 }
369 obj = (union acpi_object *)buffer.pointer; 369 obj = (union acpi_object *)buffer.pointer;
370 len = min(len, (int)obj->buffer.length);
370 memcpy(bios+offset, obj->buffer.pointer, len); 371 memcpy(bios+offset, obj->buffer.pointer, len);
371 kfree(buffer.pointer); 372 kfree(buffer.pointer);
372 return len; 373 return len;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 2e7cbe933533..5dd1d0111cac 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -898,8 +898,6 @@ nouveau_connector_helper_funcs = {
898static const struct drm_connector_funcs 898static const struct drm_connector_funcs
899nouveau_connector_funcs = { 899nouveau_connector_funcs = {
900 .dpms = drm_helper_connector_dpms, 900 .dpms = drm_helper_connector_dpms,
901 .save = NULL,
902 .restore = NULL,
903 .detect = nouveau_connector_detect, 901 .detect = nouveau_connector_detect,
904 .destroy = nouveau_connector_destroy, 902 .destroy = nouveau_connector_destroy,
905 .fill_modes = drm_helper_probe_single_connector_modes, 903 .fill_modes = drm_helper_probe_single_connector_modes,
@@ -910,8 +908,6 @@ nouveau_connector_funcs = {
910static const struct drm_connector_funcs 908static const struct drm_connector_funcs
911nouveau_connector_funcs_lvds = { 909nouveau_connector_funcs_lvds = {
912 .dpms = drm_helper_connector_dpms, 910 .dpms = drm_helper_connector_dpms,
913 .save = NULL,
914 .restore = NULL,
915 .detect = nouveau_connector_detect_lvds, 911 .detect = nouveau_connector_detect_lvds,
916 .destroy = nouveau_connector_destroy, 912 .destroy = nouveau_connector_destroy,
917 .fill_modes = drm_helper_probe_single_connector_modes, 913 .fill_modes = drm_helper_probe_single_connector_modes,
@@ -944,8 +940,6 @@ nouveau_connector_dp_dpms(struct drm_connector *connector, int mode)
944static const struct drm_connector_funcs 940static const struct drm_connector_funcs
945nouveau_connector_funcs_dp = { 941nouveau_connector_funcs_dp = {
946 .dpms = nouveau_connector_dp_dpms, 942 .dpms = nouveau_connector_dp_dpms,
947 .save = NULL,
948 .restore = NULL,
949 .detect = nouveau_connector_detect, 943 .detect = nouveau_connector_detect,
950 .destroy = nouveau_connector_destroy, 944 .destroy = nouveau_connector_destroy,
951 .fill_modes = drm_helper_probe_single_connector_modes, 945 .fill_modes = drm_helper_probe_single_connector_modes,
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
index f19cb1c5fc5a..863f10b8d818 100644
--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -73,6 +73,9 @@ struct nouveau_crtc {
73 int (*set_dither)(struct nouveau_crtc *crtc, bool update); 73 int (*set_dither)(struct nouveau_crtc *crtc, bool update);
74 int (*set_scale)(struct nouveau_crtc *crtc, bool update); 74 int (*set_scale)(struct nouveau_crtc *crtc, bool update);
75 int (*set_color_vibrance)(struct nouveau_crtc *crtc, bool update); 75 int (*set_color_vibrance)(struct nouveau_crtc *crtc, bool update);
76
77 void (*save)(struct drm_crtc *crtc);
78 void (*restore)(struct drm_crtc *crtc);
76}; 79};
77 80
78static inline struct nouveau_crtc *nouveau_crtc(struct drm_crtc *crtc) 81static inline struct nouveau_crtc *nouveau_crtc(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index db6bc6760545..18676b8c1721 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -246,7 +246,7 @@ static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
246int 246int
247nouveau_framebuffer_init(struct drm_device *dev, 247nouveau_framebuffer_init(struct drm_device *dev,
248 struct nouveau_framebuffer *nv_fb, 248 struct nouveau_framebuffer *nv_fb,
249 struct drm_mode_fb_cmd2 *mode_cmd, 249 const struct drm_mode_fb_cmd2 *mode_cmd,
250 struct nouveau_bo *nvbo) 250 struct nouveau_bo *nvbo)
251{ 251{
252 struct nouveau_display *disp = nouveau_display(dev); 252 struct nouveau_display *disp = nouveau_display(dev);
@@ -272,7 +272,7 @@ nouveau_framebuffer_init(struct drm_device *dev,
272static struct drm_framebuffer * 272static struct drm_framebuffer *
273nouveau_user_framebuffer_create(struct drm_device *dev, 273nouveau_user_framebuffer_create(struct drm_device *dev,
274 struct drm_file *file_priv, 274 struct drm_file *file_priv,
275 struct drm_mode_fb_cmd2 *mode_cmd) 275 const struct drm_mode_fb_cmd2 *mode_cmd)
276{ 276{
277 struct nouveau_framebuffer *nouveau_fb; 277 struct nouveau_framebuffer *nouveau_fb;
278 struct drm_gem_object *gem; 278 struct drm_gem_object *gem;
@@ -829,7 +829,6 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
829 struct drm_device *dev = drm->dev; 829 struct drm_device *dev = drm->dev;
830 struct nouveau_page_flip_state *s; 830 struct nouveau_page_flip_state *s;
831 unsigned long flags; 831 unsigned long flags;
832 int crtcid = -1;
833 832
834 spin_lock_irqsave(&dev->event_lock, flags); 833 spin_lock_irqsave(&dev->event_lock, flags);
835 834
@@ -841,15 +840,19 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
841 840
842 s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head); 841 s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
843 if (s->event) { 842 if (s->event) {
844 /* Vblank timestamps/counts are only correct on >= NV-50 */ 843 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
845 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) 844 drm_arm_vblank_event(dev, s->crtc, s->event);
846 crtcid = s->crtc; 845 } else {
846 drm_send_vblank_event(dev, s->crtc, s->event);
847 847
848 drm_send_vblank_event(dev, crtcid, s->event); 848 /* Give up ownership of vblank for page-flipped crtc */
849 drm_vblank_put(dev, s->crtc);
850 }
851 }
852 else {
853 /* Give up ownership of vblank for page-flipped crtc */
854 drm_vblank_put(dev, s->crtc);
849 } 855 }
850
851 /* Give up ownership of vblank for page-flipped crtc */
852 drm_vblank_put(dev, s->crtc);
853 856
854 list_del(&s->head); 857 list_del(&s->head);
855 if (ps) 858 if (ps)
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 856abe0f070d..5a57d8b472c4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -23,7 +23,7 @@ nouveau_framebuffer(struct drm_framebuffer *fb)
23} 23}
24 24
25int nouveau_framebuffer_init(struct drm_device *, struct nouveau_framebuffer *, 25int nouveau_framebuffer_init(struct drm_device *, struct nouveau_framebuffer *,
26 struct drm_mode_fb_cmd2 *, struct nouveau_bo *); 26 const struct drm_mode_fb_cmd2 *, struct nouveau_bo *);
27 27
28struct nouveau_page_flip_state { 28struct nouveau_page_flip_state {
29 struct list_head head; 29 struct list_head head;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index 3050042e6c6d..a02813e994ec 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -39,6 +39,7 @@
39 39
40#include <nvif/client.h> 40#include <nvif/client.h>
41#include <nvif/device.h> 41#include <nvif/device.h>
42#include <nvif/ioctl.h>
42 43
43#include <drmP.h> 44#include <drmP.h>
44 45
@@ -65,9 +66,10 @@ struct nouveau_drm_tile {
65}; 66};
66 67
67enum nouveau_drm_object_route { 68enum nouveau_drm_object_route {
68 NVDRM_OBJECT_NVIF = 0, 69 NVDRM_OBJECT_NVIF = NVIF_IOCTL_V0_OWNER_NVIF,
69 NVDRM_OBJECT_USIF, 70 NVDRM_OBJECT_USIF,
70 NVDRM_OBJECT_ABI16, 71 NVDRM_OBJECT_ABI16,
72 NVDRM_OBJECT_ANY = NVIF_IOCTL_V0_OWNER_ANY,
71}; 73};
72 74
73enum nouveau_drm_notify_route { 75enum nouveau_drm_notify_route {
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index b37da95105b0..c38a86408363 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -63,6 +63,9 @@ struct nouveau_encoder {
63 u32 datarate; 63 u32 datarate;
64 } dp; 64 } dp;
65 }; 65 };
66
67 void (*enc_save)(struct drm_encoder *encoder);
68 void (*enc_restore)(struct drm_encoder *encoder);
66}; 69};
67 70
68struct nouveau_encoder * 71struct nouveau_encoder *
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index 1e2e9e27a03b..ca77ad001978 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -34,7 +34,6 @@
34struct nouveau_fbdev { 34struct nouveau_fbdev {
35 struct drm_fb_helper helper; 35 struct drm_fb_helper helper;
36 struct nouveau_framebuffer nouveau_fb; 36 struct nouveau_framebuffer nouveau_fb;
37 struct list_head fbdev_list;
38 struct drm_device *dev; 37 struct drm_device *dev;
39 unsigned int saved_flags; 38 unsigned int saved_flags;
40 struct nvif_object surf2d; 39 struct nvif_object surf2d;
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c
index 89dc4ce63490..6ae1b3494bcd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_usif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_usif.c
@@ -313,7 +313,10 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
313 if (nvif_unpack(argv->v0, 0, 0, true)) { 313 if (nvif_unpack(argv->v0, 0, 0, true)) {
314 /* block access to objects not created via this interface */ 314 /* block access to objects not created via this interface */
315 owner = argv->v0.owner; 315 owner = argv->v0.owner;
316 argv->v0.owner = NVDRM_OBJECT_USIF; 316 if (argv->v0.object == 0ULL)
317 argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */
318 else
319 argv->v0.owner = NVDRM_OBJECT_USIF;
317 } else 320 } else
318 goto done; 321 goto done;
319 322
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index c053c50b346a..44e1952582aa 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -28,6 +28,7 @@
28#include <drm/drm_crtc_helper.h> 28#include <drm/drm_crtc_helper.h>
29#include <drm/drm_plane_helper.h> 29#include <drm/drm_plane_helper.h>
30#include <drm/drm_dp_helper.h> 30#include <drm/drm_dp_helper.h>
31#include <drm/drm_fb_helper.h>
31 32
32#include <nvif/class.h> 33#include <nvif/class.h>
33 34
@@ -1717,7 +1718,7 @@ nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
1717 encoder = to_drm_encoder(nv_encoder); 1718 encoder = to_drm_encoder(nv_encoder);
1718 encoder->possible_crtcs = dcbe->heads; 1719 encoder->possible_crtcs = dcbe->heads;
1719 encoder->possible_clones = 0; 1720 encoder->possible_clones = 0;
1720 drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type); 1721 drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type, NULL);
1721 drm_encoder_helper_add(encoder, &nv50_dac_hfunc); 1722 drm_encoder_helper_add(encoder, &nv50_dac_hfunc);
1722 1723
1723 drm_mode_connector_attach_encoder(connector, encoder); 1724 drm_mode_connector_attach_encoder(connector, encoder);
@@ -2125,7 +2126,7 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
2125 encoder = to_drm_encoder(nv_encoder); 2126 encoder = to_drm_encoder(nv_encoder);
2126 encoder->possible_crtcs = dcbe->heads; 2127 encoder->possible_crtcs = dcbe->heads;
2127 encoder->possible_clones = 0; 2128 encoder->possible_clones = 0;
2128 drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type); 2129 drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type, NULL);
2129 drm_encoder_helper_add(encoder, &nv50_sor_hfunc); 2130 drm_encoder_helper_add(encoder, &nv50_sor_hfunc);
2130 2131
2131 drm_mode_connector_attach_encoder(connector, encoder); 2132 drm_mode_connector_attach_encoder(connector, encoder);
@@ -2305,7 +2306,7 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
2305 encoder = to_drm_encoder(nv_encoder); 2306 encoder = to_drm_encoder(nv_encoder);
2306 encoder->possible_crtcs = dcbe->heads; 2307 encoder->possible_crtcs = dcbe->heads;
2307 encoder->possible_clones = 0; 2308 encoder->possible_clones = 0;
2308 drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type); 2309 drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type, NULL);
2309 drm_encoder_helper_add(encoder, &nv50_pior_hfunc); 2310 drm_encoder_helper_add(encoder, &nv50_pior_hfunc);
2310 2311
2311 drm_mode_connector_attach_encoder(connector, encoder); 2312 drm_mode_connector_attach_encoder(connector, encoder);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
index e3c783d0e2ab..caf22b589edc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
@@ -279,6 +279,12 @@ nvkm_device_pci_10de_0fe3[] = {
279}; 279};
280 280
281static const struct nvkm_device_pci_vendor 281static const struct nvkm_device_pci_vendor
282nvkm_device_pci_10de_0fe4[] = {
283 { 0x144d, 0xc740, NULL, { .War00C800_0 = true } },
284 {}
285};
286
287static const struct nvkm_device_pci_vendor
282nvkm_device_pci_10de_104b[] = { 288nvkm_device_pci_10de_104b[] = {
283 { 0x1043, 0x844c, "GeForce GT 625" }, 289 { 0x1043, 0x844c, "GeForce GT 625" },
284 { 0x1043, 0x846b, "GeForce GT 625" }, 290 { 0x1043, 0x846b, "GeForce GT 625" },
@@ -689,6 +695,12 @@ nvkm_device_pci_10de_1199[] = {
689}; 695};
690 696
691static const struct nvkm_device_pci_vendor 697static const struct nvkm_device_pci_vendor
698nvkm_device_pci_10de_11e0[] = {
699 { 0x1558, 0x5106, NULL, { .War00C800_0 = true } },
700 {}
701};
702
703static const struct nvkm_device_pci_vendor
692nvkm_device_pci_10de_11e3[] = { 704nvkm_device_pci_10de_11e3[] = {
693 { 0x17aa, 0x3683, "GeForce GTX 760A" }, 705 { 0x17aa, 0x3683, "GeForce GTX 760A" },
694 {} 706 {}
@@ -1370,7 +1382,7 @@ nvkm_device_pci_10de[] = {
1370 { 0x0fe1, "GeForce GT 730M" }, 1382 { 0x0fe1, "GeForce GT 730M" },
1371 { 0x0fe2, "GeForce GT 745M" }, 1383 { 0x0fe2, "GeForce GT 745M" },
1372 { 0x0fe3, "GeForce GT 745M", nvkm_device_pci_10de_0fe3 }, 1384 { 0x0fe3, "GeForce GT 745M", nvkm_device_pci_10de_0fe3 },
1373 { 0x0fe4, "GeForce GT 750M" }, 1385 { 0x0fe4, "GeForce GT 750M", nvkm_device_pci_10de_0fe4 },
1374 { 0x0fe9, "GeForce GT 750M" }, 1386 { 0x0fe9, "GeForce GT 750M" },
1375 { 0x0fea, "GeForce GT 755M" }, 1387 { 0x0fea, "GeForce GT 755M" },
1376 { 0x0fec, "GeForce 710A" }, 1388 { 0x0fec, "GeForce 710A" },
@@ -1485,7 +1497,7 @@ nvkm_device_pci_10de[] = {
1485 { 0x11c6, "GeForce GTX 650 Ti" }, 1497 { 0x11c6, "GeForce GTX 650 Ti" },
1486 { 0x11c8, "GeForce GTX 650" }, 1498 { 0x11c8, "GeForce GTX 650" },
1487 { 0x11cb, "GeForce GT 740" }, 1499 { 0x11cb, "GeForce GT 740" },
1488 { 0x11e0, "GeForce GTX 770M" }, 1500 { 0x11e0, "GeForce GTX 770M", nvkm_device_pci_10de_11e0 },
1489 { 0x11e1, "GeForce GTX 765M" }, 1501 { 0x11e1, "GeForce GTX 765M" },
1490 { 0x11e2, "GeForce GTX 765M" }, 1502 { 0x11e2, "GeForce GTX 765M" },
1491 { 0x11e3, "GeForce GTX 760M", nvkm_device_pci_10de_11e3 }, 1503 { 0x11e3, "GeForce GTX 760M", nvkm_device_pci_10de_11e3 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
index b5b875928aba..74de7a96c22a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
@@ -207,6 +207,8 @@ gf117_grctx_generate_attrib(struct gf100_grctx *info)
207 const u32 b = beta * gr->ppc_tpc_nr[gpc][ppc]; 207 const u32 b = beta * gr->ppc_tpc_nr[gpc][ppc];
208 const u32 t = timeslice_mode; 208 const u32 t = timeslice_mode;
209 const u32 o = PPC_UNIT(gpc, ppc, 0); 209 const u32 o = PPC_UNIT(gpc, ppc, 0);
210 if (!(gr->ppc_mask[gpc] & (1 << ppc)))
211 continue;
210 mmio_skip(info, o + 0xc0, (t << 28) | (b << 16) | ++bo); 212 mmio_skip(info, o + 0xc0, (t << 28) | (b << 16) | ++bo);
211 mmio_wr32(info, o + 0xc0, (t << 28) | (b << 16) | --bo); 213 mmio_wr32(info, o + 0xc0, (t << 28) | (b << 16) | --bo);
212 bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc]; 214 bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc];
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc
index 194afe910d21..7dacb3cc0668 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc
@@ -52,10 +52,12 @@ mmio_list_base:
52#endif 52#endif
53 53
54#ifdef INCLUDE_CODE 54#ifdef INCLUDE_CODE
55#define gpc_addr(reg,addr) /*
56*/ imm32(reg,addr) /*
57*/ or reg NV_PGRAPH_GPCX_GPCCS_MMIO_CTRL_BASE_ENABLE
55#define gpc_wr32(addr,reg) /* 58#define gpc_wr32(addr,reg) /*
59*/ gpc_addr($r14,addr) /*
56*/ mov b32 $r15 reg /* 60*/ mov b32 $r15 reg /*
57*/ imm32($r14, addr) /*
58*/ or $r14 NV_PGRAPH_GPCX_GPCCS_MMIO_CTRL_BASE_ENABLE /*
59*/ call(nv_wr32) 61*/ call(nv_wr32)
60 62
61// reports an exception to the host 63// reports an exception to the host
@@ -161,7 +163,7 @@ init:
161 163
162#if NV_PGRAPH_GPCX_UNK__SIZE > 0 164#if NV_PGRAPH_GPCX_UNK__SIZE > 0
163 // figure out which, and how many, UNKs are actually present 165 // figure out which, and how many, UNKs are actually present
164 imm32($r14, 0x500c30) 166 gpc_addr($r14, 0x500c30)
165 clear b32 $r2 167 clear b32 $r2
166 clear b32 $r3 168 clear b32 $r3
167 clear b32 $r4 169 clear b32 $r4
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
index 64d07df4b8b1..bb820ff28621 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
@@ -314,7 +314,7 @@ uint32_t gf117_grgpc_code[] = {
314 0x03f01200, 314 0x03f01200,
315 0x0002d000, 315 0x0002d000,
316 0x17f104bd, 316 0x17f104bd,
317 0x10fe0542, 317 0x10fe0545,
318 0x0007f100, 318 0x0007f100,
319 0x0003f007, 319 0x0003f007,
320 0xbd0000d0, 320 0xbd0000d0,
@@ -338,184 +338,184 @@ uint32_t gf117_grgpc_code[] = {
338 0x02d00103, 338 0x02d00103,
339 0xf104bd00, 339 0xf104bd00,
340 0xf00c30e7, 340 0xf00c30e7,
341 0x24bd50e3, 341 0xe5f050e3,
342 0x44bd34bd, 342 0xbd24bd01,
343/* 0x0430: init_unk_loop */ 343/* 0x0433: init_unk_loop */
344 0xb06821f4, 344 0xf444bd34,
345 0x0bf400f6, 345 0xf6b06821,
346 0x01f7f00f, 346 0x0f0bf400,
347 0xfd04f2bb, 347 0xbb01f7f0,
348 0x30b6054f, 348 0x4ffd04f2,
349/* 0x0445: init_unk_next */ 349 0x0130b605,
350 0x0120b601, 350/* 0x0448: init_unk_next */
351 0xb004e0b6, 351 0xb60120b6,
352 0x1bf40126, 352 0x26b004e0,
353/* 0x0451: init_unk_done */ 353 0xe21bf401,
354 0x070380e2, 354/* 0x0454: init_unk_done */
355 0xf1080480, 355 0x80070380,
356 0xf0010027, 356 0x27f10804,
357 0x22cf0223, 357 0x23f00100,
358 0x9534bd00, 358 0x0022cf02,
359 0x07f10825, 359 0x259534bd,
360 0x03f0c000, 360 0x0007f108,
361 0x0005d001, 361 0x0103f0c0,
362 0x07f104bd, 362 0xbd0005d0,
363 0x03f0c100, 363 0x0007f104,
364 0x0005d001, 364 0x0103f0c1,
365 0x0e9804bd, 365 0xbd0005d0,
366 0x010f9800, 366 0x000e9804,
367 0x015021f5, 367 0xf5010f98,
368 0xbb002fbb, 368 0xbb015021,
369 0x0e98003f, 369 0x3fbb002f,
370 0x020f9801, 370 0x010e9800,
371 0x015021f5, 371 0xf5020f98,
372 0xfd050e98, 372 0x98015021,
373 0x2ebb00ef, 373 0xeffd050e,
374 0x003ebb00, 374 0x002ebb00,
375 0x98020e98, 375 0x98003ebb,
376 0x21f5030f, 376 0x0f98020e,
377 0x0e980150, 377 0x5021f503,
378 0x00effd07, 378 0x070e9801,
379 0xbb002ebb, 379 0xbb00effd,
380 0x35b6003e, 380 0x3ebb002e,
381 0x0007f102, 381 0x0235b600,
382 0x0103f0d3, 382 0xd30007f1,
383 0xbd0003d0, 383 0xd00103f0,
384 0x0825b604,
385 0xb60635b6,
386 0x30b60120,
387 0x0824b601,
388 0xb90834b6,
389 0x21f5022f,
390 0x2fbb02d3,
391 0x003fbb00,
392 0x010007f1,
393 0xd00203f0,
394 0x04bd0003, 384 0x04bd0003,
395 0x29f024bd, 385 0xb60825b6,
396 0x0007f11f, 386 0x20b60635,
397 0x0203f008, 387 0x0130b601,
398 0xbd0002d0, 388 0xb60824b6,
399/* 0x0505: main */ 389 0x2fb90834,
400 0x0031f404, 390 0xd321f502,
401 0xf00028f4, 391 0x002fbb02,
402 0x21f424d7, 392 0xf1003fbb,
403 0xf401f439, 393 0xf0010007,
404 0xf404e4b0, 394 0x03d00203,
405 0x81fe1e18, 395 0xbd04bd00,
406 0x0627f001, 396 0x1f29f024,
407 0x12fd20bd, 397 0x080007f1,
408 0x01e4b604, 398 0xd00203f0,
409 0xfe051efd, 399 0x04bd0002,
410 0x21f50018, 400/* 0x0508: main */
411 0x0ef405fa, 401 0xf40031f4,
412/* 0x0535: main_not_ctx_xfer */ 402 0xd7f00028,
413 0x10ef94d3, 403 0x3921f424,
414 0xf501f5f0, 404 0xb0f401f4,
415 0xf4037e21, 405 0x18f404e4,
416/* 0x0542: ih */ 406 0x0181fe1e,
417 0x80f9c60e, 407 0xbd0627f0,
418 0xf90188fe, 408 0x0412fd20,
419 0xf990f980, 409 0xfd01e4b6,
420 0xf9b0f9a0, 410 0x18fe051e,
421 0xf9e0f9d0, 411 0xfd21f500,
422 0xf104bdf0, 412 0xd30ef405,
423 0xf00200a7, 413/* 0x0538: main_not_ctx_xfer */
424 0xaacf00a3, 414 0xf010ef94,
425 0x04abc400, 415 0x21f501f5,
426 0xf02c0bf4, 416 0x0ef4037e,
427 0xe7f124d7, 417/* 0x0545: ih */
428 0xe3f01a00, 418 0xfe80f9c6,
429 0x00eecf00, 419 0x80f90188,
430 0x1900f7f1, 420 0xa0f990f9,
431 0xcf00f3f0, 421 0xd0f9b0f9,
432 0x21f400ff, 422 0xf0f9e0f9,
433 0x01e7f004, 423 0xa7f104bd,
434 0x1d0007f1, 424 0xa3f00200,
435 0xd00003f0, 425 0x00aacf00,
436 0x04bd000e, 426 0xf404abc4,
437/* 0x0590: ih_no_fifo */ 427 0xd7f02c0b,
438 0x010007f1, 428 0x00e7f124,
439 0xd00003f0, 429 0x00e3f01a,
440 0x04bd000a, 430 0xf100eecf,
441 0xe0fcf0fc, 431 0xf01900f7,
442 0xb0fcd0fc, 432 0xffcf00f3,
443 0x90fca0fc, 433 0x0421f400,
444 0x88fe80fc, 434 0xf101e7f0,
445 0xf480fc00, 435 0xf01d0007,
446 0x01f80032, 436 0x0ed00003,
447/* 0x05b4: hub_barrier_done */ 437/* 0x0593: ih_no_fifo */
448 0x9801f7f0, 438 0xf104bd00,
449 0xfebb040e, 439 0xf0010007,
450 0x02ffb904, 440 0x0ad00003,
451 0x9418e7f1, 441 0xfc04bd00,
452 0xf440e3f0, 442 0xfce0fcf0,
453 0x00f89d21, 443 0xfcb0fcd0,
454/* 0x05cc: ctx_redswitch */ 444 0xfc90fca0,
455 0xf120f7f0, 445 0x0088fe80,
446 0x32f480fc,
447/* 0x05b7: hub_barrier_done */
448 0xf001f800,
449 0x0e9801f7,
450 0x04febb04,
451 0xf102ffb9,
452 0xf09418e7,
453 0x21f440e3,
454/* 0x05cf: ctx_redswitch */
455 0xf000f89d,
456 0x07f120f7,
457 0x03f08500,
458 0x000fd001,
459 0xe7f004bd,
460/* 0x05e1: ctx_redswitch_delay */
461 0x01e2b608,
462 0xf1fd1bf4,
463 0xf10800f5,
464 0xf10200f5,
456 0xf0850007, 465 0xf0850007,
457 0x0fd00103, 466 0x0fd00103,
458 0xf004bd00, 467 0xf804bd00,
459/* 0x05de: ctx_redswitch_delay */ 468/* 0x05fd: ctx_xfer */
460 0xe2b608e7, 469 0x0007f100,
461 0xfd1bf401, 470 0x0203f081,
462 0x0800f5f1, 471 0xbd000fd0,
463 0x0200f5f1, 472 0x0711f404,
464 0x850007f1, 473 0x05cf21f5,
465 0xd00103f0, 474/* 0x0610: ctx_xfer_not_load */
466 0x04bd000f, 475 0x026a21f5,
467/* 0x05fa: ctx_xfer */ 476 0x07f124bd,
468 0x07f100f8, 477 0x03f047fc,
469 0x03f08100, 478 0x0002d002,
470 0x000fd002, 479 0x2cf004bd,
471 0x11f404bd, 480 0x0320b601,
472 0xcc21f507, 481 0x4afc07f1,
473/* 0x060d: ctx_xfer_not_load */ 482 0xd00203f0,
474 0x6a21f505, 483 0x04bd0002,
475 0xf124bd02,
476 0xf047fc07,
477 0x02d00203,
478 0xf004bd00,
479 0x20b6012c,
480 0xfc07f103,
481 0x0203f04a,
482 0xbd0002d0,
483 0x01acf004,
484 0xf102a5f0,
485 0xf00000b7,
486 0x0c9850b3,
487 0x0fc4b604,
488 0x9800bcbb,
489 0x0d98000c,
490 0x00e7f001,
491 0x016f21f5,
492 0xf101acf0,
493 0xf04000b7,
494 0x0c9850b3,
495 0x0fc4b604,
496 0x9800bcbb,
497 0x0d98010c,
498 0x060f9802,
499 0x0800e7f1,
500 0x016f21f5,
501 0xf001acf0, 484 0xf001acf0,
502 0xb7f104a5, 485 0xb7f102a5,
503 0xb3f03000, 486 0xb3f00000,
504 0x040c9850, 487 0x040c9850,
505 0xbb0fc4b6, 488 0xbb0fc4b6,
506 0x0c9800bc, 489 0x0c9800bc,
507 0x030d9802, 490 0x010d9800,
508 0xf1080f98, 491 0xf500e7f0,
509 0xf50200e7, 492 0xf0016f21,
510 0xf5016f21, 493 0xb7f101ac,
511 0xf4025e21, 494 0xb3f04000,
512 0x12f40601, 495 0x040c9850,
513/* 0x06a9: ctx_xfer_post */ 496 0xbb0fc4b6,
514 0x7f21f507, 497 0x0c9800bc,
515/* 0x06ad: ctx_xfer_done */ 498 0x020d9801,
516 0xb421f502, 499 0xf1060f98,
517 0x0000f805, 500 0xf50800e7,
518 0x00000000, 501 0xf0016f21,
502 0xa5f001ac,
503 0x00b7f104,
504 0x50b3f030,
505 0xb6040c98,
506 0xbcbb0fc4,
507 0x020c9800,
508 0x98030d98,
509 0xe7f1080f,
510 0x21f50200,
511 0x21f5016f,
512 0x01f4025e,
513 0x0712f406,
514/* 0x06ac: ctx_xfer_post */
515 0x027f21f5,
516/* 0x06b0: ctx_xfer_done */
517 0x05b721f5,
518 0x000000f8,
519 0x00000000, 519 0x00000000,
520 0x00000000, 520 0x00000000,
521 0x00000000, 521 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
index 2f596433c222..911976d20940 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
@@ -314,7 +314,7 @@ uint32_t gk104_grgpc_code[] = {
314 0x03f01200, 314 0x03f01200,
315 0x0002d000, 315 0x0002d000,
316 0x17f104bd, 316 0x17f104bd,
317 0x10fe0542, 317 0x10fe0545,
318 0x0007f100, 318 0x0007f100,
319 0x0003f007, 319 0x0003f007,
320 0xbd0000d0, 320 0xbd0000d0,
@@ -338,184 +338,184 @@ uint32_t gk104_grgpc_code[] = {
338 0x02d00103, 338 0x02d00103,
339 0xf104bd00, 339 0xf104bd00,
340 0xf00c30e7, 340 0xf00c30e7,
341 0x24bd50e3, 341 0xe5f050e3,
342 0x44bd34bd, 342 0xbd24bd01,
343/* 0x0430: init_unk_loop */ 343/* 0x0433: init_unk_loop */
344 0xb06821f4, 344 0xf444bd34,
345 0x0bf400f6, 345 0xf6b06821,
346 0x01f7f00f, 346 0x0f0bf400,
347 0xfd04f2bb, 347 0xbb01f7f0,
348 0x30b6054f, 348 0x4ffd04f2,
349/* 0x0445: init_unk_next */ 349 0x0130b605,
350 0x0120b601, 350/* 0x0448: init_unk_next */
351 0xb004e0b6, 351 0xb60120b6,
352 0x1bf40126, 352 0x26b004e0,
353/* 0x0451: init_unk_done */ 353 0xe21bf401,
354 0x070380e2, 354/* 0x0454: init_unk_done */
355 0xf1080480, 355 0x80070380,
356 0xf0010027, 356 0x27f10804,
357 0x22cf0223, 357 0x23f00100,
358 0x9534bd00, 358 0x0022cf02,
359 0x07f10825, 359 0x259534bd,
360 0x03f0c000, 360 0x0007f108,
361 0x0005d001, 361 0x0103f0c0,
362 0x07f104bd, 362 0xbd0005d0,
363 0x03f0c100, 363 0x0007f104,
364 0x0005d001, 364 0x0103f0c1,
365 0x0e9804bd, 365 0xbd0005d0,
366 0x010f9800, 366 0x000e9804,
367 0x015021f5, 367 0xf5010f98,
368 0xbb002fbb, 368 0xbb015021,
369 0x0e98003f, 369 0x3fbb002f,
370 0x020f9801, 370 0x010e9800,
371 0x015021f5, 371 0xf5020f98,
372 0xfd050e98, 372 0x98015021,
373 0x2ebb00ef, 373 0xeffd050e,
374 0x003ebb00, 374 0x002ebb00,
375 0x98020e98, 375 0x98003ebb,
376 0x21f5030f, 376 0x0f98020e,
377 0x0e980150, 377 0x5021f503,
378 0x00effd07, 378 0x070e9801,
379 0xbb002ebb, 379 0xbb00effd,
380 0x35b6003e, 380 0x3ebb002e,
381 0x0007f102, 381 0x0235b600,
382 0x0103f0d3, 382 0xd30007f1,
383 0xbd0003d0, 383 0xd00103f0,
384 0x0825b604,
385 0xb60635b6,
386 0x30b60120,
387 0x0824b601,
388 0xb90834b6,
389 0x21f5022f,
390 0x2fbb02d3,
391 0x003fbb00,
392 0x010007f1,
393 0xd00203f0,
394 0x04bd0003, 384 0x04bd0003,
395 0x29f024bd, 385 0xb60825b6,
396 0x0007f11f, 386 0x20b60635,
397 0x0203f008, 387 0x0130b601,
398 0xbd0002d0, 388 0xb60824b6,
399/* 0x0505: main */ 389 0x2fb90834,
400 0x0031f404, 390 0xd321f502,
401 0xf00028f4, 391 0x002fbb02,
402 0x21f424d7, 392 0xf1003fbb,
403 0xf401f439, 393 0xf0010007,
404 0xf404e4b0, 394 0x03d00203,
405 0x81fe1e18, 395 0xbd04bd00,
406 0x0627f001, 396 0x1f29f024,
407 0x12fd20bd, 397 0x080007f1,
408 0x01e4b604, 398 0xd00203f0,
409 0xfe051efd, 399 0x04bd0002,
410 0x21f50018, 400/* 0x0508: main */
411 0x0ef405fa, 401 0xf40031f4,
412/* 0x0535: main_not_ctx_xfer */ 402 0xd7f00028,
413 0x10ef94d3, 403 0x3921f424,
414 0xf501f5f0, 404 0xb0f401f4,
415 0xf4037e21, 405 0x18f404e4,
416/* 0x0542: ih */ 406 0x0181fe1e,
417 0x80f9c60e, 407 0xbd0627f0,
418 0xf90188fe, 408 0x0412fd20,
419 0xf990f980, 409 0xfd01e4b6,
420 0xf9b0f9a0, 410 0x18fe051e,
421 0xf9e0f9d0, 411 0xfd21f500,
422 0xf104bdf0, 412 0xd30ef405,
423 0xf00200a7, 413/* 0x0538: main_not_ctx_xfer */
424 0xaacf00a3, 414 0xf010ef94,
425 0x04abc400, 415 0x21f501f5,
426 0xf02c0bf4, 416 0x0ef4037e,
427 0xe7f124d7, 417/* 0x0545: ih */
428 0xe3f01a00, 418 0xfe80f9c6,
429 0x00eecf00, 419 0x80f90188,
430 0x1900f7f1, 420 0xa0f990f9,
431 0xcf00f3f0, 421 0xd0f9b0f9,
432 0x21f400ff, 422 0xf0f9e0f9,
433 0x01e7f004, 423 0xa7f104bd,
434 0x1d0007f1, 424 0xa3f00200,
435 0xd00003f0, 425 0x00aacf00,
436 0x04bd000e, 426 0xf404abc4,
437/* 0x0590: ih_no_fifo */ 427 0xd7f02c0b,
438 0x010007f1, 428 0x00e7f124,
439 0xd00003f0, 429 0x00e3f01a,
440 0x04bd000a, 430 0xf100eecf,
441 0xe0fcf0fc, 431 0xf01900f7,
442 0xb0fcd0fc, 432 0xffcf00f3,
443 0x90fca0fc, 433 0x0421f400,
444 0x88fe80fc, 434 0xf101e7f0,
445 0xf480fc00, 435 0xf01d0007,
446 0x01f80032, 436 0x0ed00003,
447/* 0x05b4: hub_barrier_done */ 437/* 0x0593: ih_no_fifo */
448 0x9801f7f0, 438 0xf104bd00,
449 0xfebb040e, 439 0xf0010007,
450 0x02ffb904, 440 0x0ad00003,
451 0x9418e7f1, 441 0xfc04bd00,
452 0xf440e3f0, 442 0xfce0fcf0,
453 0x00f89d21, 443 0xfcb0fcd0,
454/* 0x05cc: ctx_redswitch */ 444 0xfc90fca0,
455 0xf120f7f0, 445 0x0088fe80,
446 0x32f480fc,
447/* 0x05b7: hub_barrier_done */
448 0xf001f800,
449 0x0e9801f7,
450 0x04febb04,
451 0xf102ffb9,
452 0xf09418e7,
453 0x21f440e3,
454/* 0x05cf: ctx_redswitch */
455 0xf000f89d,
456 0x07f120f7,
457 0x03f08500,
458 0x000fd001,
459 0xe7f004bd,
460/* 0x05e1: ctx_redswitch_delay */
461 0x01e2b608,
462 0xf1fd1bf4,
463 0xf10800f5,
464 0xf10200f5,
456 0xf0850007, 465 0xf0850007,
457 0x0fd00103, 466 0x0fd00103,
458 0xf004bd00, 467 0xf804bd00,
459/* 0x05de: ctx_redswitch_delay */ 468/* 0x05fd: ctx_xfer */
460 0xe2b608e7, 469 0x0007f100,
461 0xfd1bf401, 470 0x0203f081,
462 0x0800f5f1, 471 0xbd000fd0,
463 0x0200f5f1, 472 0x0711f404,
464 0x850007f1, 473 0x05cf21f5,
465 0xd00103f0, 474/* 0x0610: ctx_xfer_not_load */
466 0x04bd000f, 475 0x026a21f5,
467/* 0x05fa: ctx_xfer */ 476 0x07f124bd,
468 0x07f100f8, 477 0x03f047fc,
469 0x03f08100, 478 0x0002d002,
470 0x000fd002, 479 0x2cf004bd,
471 0x11f404bd, 480 0x0320b601,
472 0xcc21f507, 481 0x4afc07f1,
473/* 0x060d: ctx_xfer_not_load */ 482 0xd00203f0,
474 0x6a21f505, 483 0x04bd0002,
475 0xf124bd02,
476 0xf047fc07,
477 0x02d00203,
478 0xf004bd00,
479 0x20b6012c,
480 0xfc07f103,
481 0x0203f04a,
482 0xbd0002d0,
483 0x01acf004,
484 0xf102a5f0,
485 0xf00000b7,
486 0x0c9850b3,
487 0x0fc4b604,
488 0x9800bcbb,
489 0x0d98000c,
490 0x00e7f001,
491 0x016f21f5,
492 0xf101acf0,
493 0xf04000b7,
494 0x0c9850b3,
495 0x0fc4b604,
496 0x9800bcbb,
497 0x0d98010c,
498 0x060f9802,
499 0x0800e7f1,
500 0x016f21f5,
501 0xf001acf0, 484 0xf001acf0,
502 0xb7f104a5, 485 0xb7f102a5,
503 0xb3f03000, 486 0xb3f00000,
504 0x040c9850, 487 0x040c9850,
505 0xbb0fc4b6, 488 0xbb0fc4b6,
506 0x0c9800bc, 489 0x0c9800bc,
507 0x030d9802, 490 0x010d9800,
508 0xf1080f98, 491 0xf500e7f0,
509 0xf50200e7, 492 0xf0016f21,
510 0xf5016f21, 493 0xb7f101ac,
511 0xf4025e21, 494 0xb3f04000,
512 0x12f40601, 495 0x040c9850,
513/* 0x06a9: ctx_xfer_post */ 496 0xbb0fc4b6,
514 0x7f21f507, 497 0x0c9800bc,
515/* 0x06ad: ctx_xfer_done */ 498 0x020d9801,
516 0xb421f502, 499 0xf1060f98,
517 0x0000f805, 500 0xf50800e7,
518 0x00000000, 501 0xf0016f21,
502 0xa5f001ac,
503 0x00b7f104,
504 0x50b3f030,
505 0xb6040c98,
506 0xbcbb0fc4,
507 0x020c9800,
508 0x98030d98,
509 0xe7f1080f,
510 0x21f50200,
511 0x21f5016f,
512 0x01f4025e,
513 0x0712f406,
514/* 0x06ac: ctx_xfer_post */
515 0x027f21f5,
516/* 0x06b0: ctx_xfer_done */
517 0x05b721f5,
518 0x000000f8,
519 0x00000000, 519 0x00000000,
520 0x00000000, 520 0x00000000,
521 0x00000000, 521 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
index ee8e54db8fc9..1c6e11b05df2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
@@ -314,7 +314,7 @@ uint32_t gk110_grgpc_code[] = {
314 0x03f01200, 314 0x03f01200,
315 0x0002d000, 315 0x0002d000,
316 0x17f104bd, 316 0x17f104bd,
317 0x10fe0542, 317 0x10fe0545,
318 0x0007f100, 318 0x0007f100,
319 0x0003f007, 319 0x0003f007,
320 0xbd0000d0, 320 0xbd0000d0,
@@ -338,184 +338,184 @@ uint32_t gk110_grgpc_code[] = {
338 0x02d00103, 338 0x02d00103,
339 0xf104bd00, 339 0xf104bd00,
340 0xf00c30e7, 340 0xf00c30e7,
341 0x24bd50e3, 341 0xe5f050e3,
342 0x44bd34bd, 342 0xbd24bd01,
343/* 0x0430: init_unk_loop */ 343/* 0x0433: init_unk_loop */
344 0xb06821f4, 344 0xf444bd34,
345 0x0bf400f6, 345 0xf6b06821,
346 0x01f7f00f, 346 0x0f0bf400,
347 0xfd04f2bb, 347 0xbb01f7f0,
348 0x30b6054f, 348 0x4ffd04f2,
349/* 0x0445: init_unk_next */ 349 0x0130b605,
350 0x0120b601, 350/* 0x0448: init_unk_next */
351 0xb004e0b6, 351 0xb60120b6,
352 0x1bf40226, 352 0x26b004e0,
353/* 0x0451: init_unk_done */ 353 0xe21bf402,
354 0x070380e2, 354/* 0x0454: init_unk_done */
355 0xf1080480, 355 0x80070380,
356 0xf0010027, 356 0x27f10804,
357 0x22cf0223, 357 0x23f00100,
358 0x9534bd00, 358 0x0022cf02,
359 0x07f10825, 359 0x259534bd,
360 0x03f0c000, 360 0x0007f108,
361 0x0005d001, 361 0x0103f0c0,
362 0x07f104bd, 362 0xbd0005d0,
363 0x03f0c100, 363 0x0007f104,
364 0x0005d001, 364 0x0103f0c1,
365 0x0e9804bd, 365 0xbd0005d0,
366 0x010f9800, 366 0x000e9804,
367 0x015021f5, 367 0xf5010f98,
368 0xbb002fbb, 368 0xbb015021,
369 0x0e98003f, 369 0x3fbb002f,
370 0x020f9801, 370 0x010e9800,
371 0x015021f5, 371 0xf5020f98,
372 0xfd050e98, 372 0x98015021,
373 0x2ebb00ef, 373 0xeffd050e,
374 0x003ebb00, 374 0x002ebb00,
375 0x98020e98, 375 0x98003ebb,
376 0x21f5030f, 376 0x0f98020e,
377 0x0e980150, 377 0x5021f503,
378 0x00effd07, 378 0x070e9801,
379 0xbb002ebb, 379 0xbb00effd,
380 0x35b6003e, 380 0x3ebb002e,
381 0x0007f102, 381 0x0235b600,
382 0x0103f0d3, 382 0xd30007f1,
383 0xbd0003d0, 383 0xd00103f0,
384 0x0825b604,
385 0xb60635b6,
386 0x30b60120,
387 0x0824b601,
388 0xb90834b6,
389 0x21f5022f,
390 0x2fbb02d3,
391 0x003fbb00,
392 0x010007f1,
393 0xd00203f0,
394 0x04bd0003, 384 0x04bd0003,
395 0x29f024bd, 385 0xb60825b6,
396 0x0007f11f, 386 0x20b60635,
397 0x0203f030, 387 0x0130b601,
398 0xbd0002d0, 388 0xb60824b6,
399/* 0x0505: main */ 389 0x2fb90834,
400 0x0031f404, 390 0xd321f502,
401 0xf00028f4, 391 0x002fbb02,
402 0x21f424d7, 392 0xf1003fbb,
403 0xf401f439, 393 0xf0010007,
404 0xf404e4b0, 394 0x03d00203,
405 0x81fe1e18, 395 0xbd04bd00,
406 0x0627f001, 396 0x1f29f024,
407 0x12fd20bd, 397 0x300007f1,
408 0x01e4b604, 398 0xd00203f0,
409 0xfe051efd, 399 0x04bd0002,
410 0x21f50018, 400/* 0x0508: main */
411 0x0ef405fa, 401 0xf40031f4,
412/* 0x0535: main_not_ctx_xfer */ 402 0xd7f00028,
413 0x10ef94d3, 403 0x3921f424,
414 0xf501f5f0, 404 0xb0f401f4,
415 0xf4037e21, 405 0x18f404e4,
416/* 0x0542: ih */ 406 0x0181fe1e,
417 0x80f9c60e, 407 0xbd0627f0,
418 0xf90188fe, 408 0x0412fd20,
419 0xf990f980, 409 0xfd01e4b6,
420 0xf9b0f9a0, 410 0x18fe051e,
421 0xf9e0f9d0, 411 0xfd21f500,
422 0xf104bdf0, 412 0xd30ef405,
423 0xf00200a7, 413/* 0x0538: main_not_ctx_xfer */
424 0xaacf00a3, 414 0xf010ef94,
425 0x04abc400, 415 0x21f501f5,
426 0xf02c0bf4, 416 0x0ef4037e,
427 0xe7f124d7, 417/* 0x0545: ih */
428 0xe3f01a00, 418 0xfe80f9c6,
429 0x00eecf00, 419 0x80f90188,
430 0x1900f7f1, 420 0xa0f990f9,
431 0xcf00f3f0, 421 0xd0f9b0f9,
432 0x21f400ff, 422 0xf0f9e0f9,
433 0x01e7f004, 423 0xa7f104bd,
434 0x1d0007f1, 424 0xa3f00200,
435 0xd00003f0, 425 0x00aacf00,
436 0x04bd000e, 426 0xf404abc4,
437/* 0x0590: ih_no_fifo */ 427 0xd7f02c0b,
438 0x010007f1, 428 0x00e7f124,
439 0xd00003f0, 429 0x00e3f01a,
440 0x04bd000a, 430 0xf100eecf,
441 0xe0fcf0fc, 431 0xf01900f7,
442 0xb0fcd0fc, 432 0xffcf00f3,
443 0x90fca0fc, 433 0x0421f400,
444 0x88fe80fc, 434 0xf101e7f0,
445 0xf480fc00, 435 0xf01d0007,
446 0x01f80032, 436 0x0ed00003,
447/* 0x05b4: hub_barrier_done */ 437/* 0x0593: ih_no_fifo */
448 0x9801f7f0, 438 0xf104bd00,
449 0xfebb040e, 439 0xf0010007,
450 0x02ffb904, 440 0x0ad00003,
451 0x9418e7f1, 441 0xfc04bd00,
452 0xf440e3f0, 442 0xfce0fcf0,
453 0x00f89d21, 443 0xfcb0fcd0,
454/* 0x05cc: ctx_redswitch */ 444 0xfc90fca0,
455 0xf120f7f0, 445 0x0088fe80,
446 0x32f480fc,
447/* 0x05b7: hub_barrier_done */
448 0xf001f800,
449 0x0e9801f7,
450 0x04febb04,
451 0xf102ffb9,
452 0xf09418e7,
453 0x21f440e3,
454/* 0x05cf: ctx_redswitch */
455 0xf000f89d,
456 0x07f120f7,
457 0x03f08500,
458 0x000fd001,
459 0xe7f004bd,
460/* 0x05e1: ctx_redswitch_delay */
461 0x01e2b608,
462 0xf1fd1bf4,
463 0xf10800f5,
464 0xf10200f5,
456 0xf0850007, 465 0xf0850007,
457 0x0fd00103, 466 0x0fd00103,
458 0xf004bd00, 467 0xf804bd00,
459/* 0x05de: ctx_redswitch_delay */ 468/* 0x05fd: ctx_xfer */
460 0xe2b608e7, 469 0x0007f100,
461 0xfd1bf401, 470 0x0203f081,
462 0x0800f5f1, 471 0xbd000fd0,
463 0x0200f5f1, 472 0x0711f404,
464 0x850007f1, 473 0x05cf21f5,
465 0xd00103f0, 474/* 0x0610: ctx_xfer_not_load */
466 0x04bd000f, 475 0x026a21f5,
467/* 0x05fa: ctx_xfer */ 476 0x07f124bd,
468 0x07f100f8, 477 0x03f047fc,
469 0x03f08100, 478 0x0002d002,
470 0x000fd002, 479 0x2cf004bd,
471 0x11f404bd, 480 0x0320b601,
472 0xcc21f507, 481 0x4afc07f1,
473/* 0x060d: ctx_xfer_not_load */ 482 0xd00203f0,
474 0x6a21f505, 483 0x04bd0002,
475 0xf124bd02,
476 0xf047fc07,
477 0x02d00203,
478 0xf004bd00,
479 0x20b6012c,
480 0xfc07f103,
481 0x0203f04a,
482 0xbd0002d0,
483 0x01acf004,
484 0xf102a5f0,
485 0xf00000b7,
486 0x0c9850b3,
487 0x0fc4b604,
488 0x9800bcbb,
489 0x0d98000c,
490 0x00e7f001,
491 0x016f21f5,
492 0xf101acf0,
493 0xf04000b7,
494 0x0c9850b3,
495 0x0fc4b604,
496 0x9800bcbb,
497 0x0d98010c,
498 0x060f9802,
499 0x0800e7f1,
500 0x016f21f5,
501 0xf001acf0, 484 0xf001acf0,
502 0xb7f104a5, 485 0xb7f102a5,
503 0xb3f03000, 486 0xb3f00000,
504 0x040c9850, 487 0x040c9850,
505 0xbb0fc4b6, 488 0xbb0fc4b6,
506 0x0c9800bc, 489 0x0c9800bc,
507 0x030d9802, 490 0x010d9800,
508 0xf1080f98, 491 0xf500e7f0,
509 0xf50200e7, 492 0xf0016f21,
510 0xf5016f21, 493 0xb7f101ac,
511 0xf4025e21, 494 0xb3f04000,
512 0x12f40601, 495 0x040c9850,
513/* 0x06a9: ctx_xfer_post */ 496 0xbb0fc4b6,
514 0x7f21f507, 497 0x0c9800bc,
515/* 0x06ad: ctx_xfer_done */ 498 0x020d9801,
516 0xb421f502, 499 0xf1060f98,
517 0x0000f805, 500 0xf50800e7,
518 0x00000000, 501 0xf0016f21,
502 0xa5f001ac,
503 0x00b7f104,
504 0x50b3f030,
505 0xb6040c98,
506 0xbcbb0fc4,
507 0x020c9800,
508 0x98030d98,
509 0xe7f1080f,
510 0x21f50200,
511 0x21f5016f,
512 0x01f4025e,
513 0x0712f406,
514/* 0x06ac: ctx_xfer_post */
515 0x027f21f5,
516/* 0x06b0: ctx_xfer_done */
517 0x05b721f5,
518 0x000000f8,
519 0x00000000, 519 0x00000000,
520 0x00000000, 520 0x00000000,
521 0x00000000, 521 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
index fbcc342f896f..84af7ec6a78e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
@@ -276,7 +276,7 @@ uint32_t gk208_grgpc_code[] = {
276 0x02020014, 276 0x02020014,
277 0xf6120040, 277 0xf6120040,
278 0x04bd0002, 278 0x04bd0002,
279 0xfe048141, 279 0xfe048441,
280 0x00400010, 280 0x00400010,
281 0x0000f607, 281 0x0000f607,
282 0x040204bd, 282 0x040204bd,
@@ -295,165 +295,165 @@ uint32_t gk208_grgpc_code[] = {
295 0x01c90080, 295 0x01c90080,
296 0xbd0002f6, 296 0xbd0002f6,
297 0x0c308e04, 297 0x0c308e04,
298 0xbd24bd50, 298 0x01e5f050,
299/* 0x0383: init_unk_loop */ 299 0x34bd24bd,
300 0x7e44bd34, 300/* 0x0386: init_unk_loop */
301 0xb0000065, 301 0x657e44bd,
302 0x0bf400f6, 302 0xf6b00000,
303 0xbb010f0e, 303 0x0e0bf400,
304 0x4ffd04f2, 304 0xf2bb010f,
305 0x0130b605, 305 0x054ffd04,
306/* 0x0398: init_unk_next */ 306/* 0x039b: init_unk_next */
307 0xb60120b6, 307 0xb60130b6,
308 0x26b004e0, 308 0xe0b60120,
309 0xe21bf401, 309 0x0126b004,
310/* 0x03a4: init_unk_done */ 310/* 0x03a7: init_unk_done */
311 0xb50703b5, 311 0xb5e21bf4,
312 0x00820804, 312 0x04b50703,
313 0x22cf0201, 313 0x01008208,
314 0x9534bd00, 314 0x0022cf02,
315 0x00800825, 315 0x259534bd,
316 0x05f601c0, 316 0xc0008008,
317 0x8004bd00, 317 0x0005f601,
318 0xf601c100, 318 0x008004bd,
319 0x04bd0005, 319 0x05f601c1,
320 0x98000e98, 320 0x9804bd00,
321 0x207e010f, 321 0x0f98000e,
322 0x2fbb0001, 322 0x01207e01,
323 0x003fbb00, 323 0x002fbb00,
324 0x98010e98, 324 0x98003fbb,
325 0x207e020f, 325 0x0f98010e,
326 0x0e980001, 326 0x01207e02,
327 0x00effd05, 327 0x050e9800,
328 0xbb002ebb, 328 0xbb00effd,
329 0x0e98003e, 329 0x3ebb002e,
330 0x030f9802, 330 0x020e9800,
331 0x0001207e, 331 0x7e030f98,
332 0xfd070e98, 332 0x98000120,
333 0x2ebb00ef, 333 0xeffd070e,
334 0x003ebb00, 334 0x002ebb00,
335 0x800235b6, 335 0xb6003ebb,
336 0xf601d300, 336 0x00800235,
337 0x04bd0003, 337 0x03f601d3,
338 0xb60825b6, 338 0xb604bd00,
339 0x20b60635, 339 0x35b60825,
340 0x0130b601, 340 0x0120b606,
341 0xb60824b6, 341 0xb60130b6,
342 0x2fb20834, 342 0x34b60824,
343 0x0002687e, 343 0x7e2fb208,
344 0xbb002fbb, 344 0xbb000268,
345 0x0080003f, 345 0x3fbb002f,
346 0x03f60201, 346 0x01008000,
347 0xbd04bd00, 347 0x0003f602,
348 0x1f29f024, 348 0x24bd04bd,
349 0x02300080, 349 0x801f29f0,
350 0xbd0002f6, 350 0xf6023000,
351/* 0x0445: main */ 351 0x04bd0002,
352 0x0031f404, 352/* 0x0448: main */
353 0x0d0028f4, 353 0xf40031f4,
354 0x00377e24, 354 0x240d0028,
355 0xf401f400, 355 0x0000377e,
356 0xf404e4b0, 356 0xb0f401f4,
357 0x81fe1d18, 357 0x18f404e4,
358 0xbd060201, 358 0x0181fe1d,
359 0x0412fd20, 359 0x20bd0602,
360 0xfd01e4b6, 360 0xb60412fd,
361 0x18fe051e, 361 0x1efd01e4,
362 0x05187e00, 362 0x0018fe05,
363 0xd40ef400, 363 0x00051b7e,
364/* 0x0474: main_not_ctx_xfer */ 364/* 0x0477: main_not_ctx_xfer */
365 0xf010ef94, 365 0x94d40ef4,
366 0xf87e01f5, 366 0xf5f010ef,
367 0x0ef40002, 367 0x02f87e01,
368/* 0x0481: ih */ 368 0xc70ef400,
369 0xfe80f9c7, 369/* 0x0484: ih */
370 0x80f90188, 370 0x88fe80f9,
371 0xa0f990f9, 371 0xf980f901,
372 0xd0f9b0f9, 372 0xf9a0f990,
373 0xf0f9e0f9, 373 0xf9d0f9b0,
374 0x004a04bd, 374 0xbdf0f9e0,
375 0x00aacf02, 375 0x02004a04,
376 0xf404abc4, 376 0xc400aacf,
377 0x240d1f0b, 377 0x0bf404ab,
378 0xcf1a004e, 378 0x4e240d1f,
379 0x004f00ee, 379 0xeecf1a00,
380 0x00ffcf19, 380 0x19004f00,
381 0x0000047e, 381 0x7e00ffcf,
382 0x0040010e, 382 0x0e000004,
383 0x000ef61d, 383 0x1d004001,
384/* 0x04be: ih_no_fifo */ 384 0xbd000ef6,
385 0x004004bd, 385/* 0x04c1: ih_no_fifo */
386 0x000af601, 386 0x01004004,
387 0xf0fc04bd, 387 0xbd000af6,
388 0xd0fce0fc, 388 0xfcf0fc04,
389 0xa0fcb0fc, 389 0xfcd0fce0,
390 0x80fc90fc, 390 0xfca0fcb0,
391 0xfc0088fe, 391 0xfe80fc90,
392 0x0032f480, 392 0x80fc0088,
393/* 0x04de: hub_barrier_done */ 393 0xf80032f4,
394 0x010f01f8, 394/* 0x04e1: hub_barrier_done */
395 0xbb040e98, 395 0x98010f01,
396 0xffb204fe, 396 0xfebb040e,
397 0x4094188e, 397 0x8effb204,
398 0x00008f7e, 398 0x7e409418,
399/* 0x04f2: ctx_redswitch */ 399 0xf800008f,
400 0x200f00f8, 400/* 0x04f5: ctx_redswitch */
401 0x80200f00,
402 0xf6018500,
403 0x04bd000f,
404/* 0x0502: ctx_redswitch_delay */
405 0xe2b6080e,
406 0xfd1bf401,
407 0x0800f5f1,
408 0x0200f5f1,
401 0x01850080, 409 0x01850080,
402 0xbd000ff6, 410 0xbd000ff6,
403/* 0x04ff: ctx_redswitch_delay */ 411/* 0x051b: ctx_xfer */
404 0xb6080e04, 412 0x8000f804,
405 0x1bf401e2, 413 0xf6028100,
406 0x00f5f1fd, 414 0x04bd000f,
407 0x00f5f108, 415 0x7e0711f4,
408 0x85008002, 416/* 0x052b: ctx_xfer_not_load */
409 0x000ff601, 417 0x7e0004f5,
410 0x00f804bd, 418 0xbd000216,
411/* 0x0518: ctx_xfer */ 419 0x47fc8024,
412 0x02810080,
413 0xbd000ff6,
414 0x0711f404,
415 0x0004f27e,
416/* 0x0528: ctx_xfer_not_load */
417 0x0002167e,
418 0xfc8024bd,
419 0x02f60247,
420 0xf004bd00,
421 0x20b6012c,
422 0x4afc8003,
423 0x0002f602, 420 0x0002f602,
424 0xacf004bd, 421 0x2cf004bd,
425 0x02a5f001, 422 0x0320b601,
426 0x5000008b, 423 0x024afc80,
427 0xb6040c98, 424 0xbd0002f6,
428 0xbcbb0fc4, 425 0x01acf004,
429 0x000c9800, 426 0x8b02a5f0,
430 0x0e010d98, 427 0x98500000,
431 0x013d7e00,
432 0x01acf000,
433 0x5040008b,
434 0xb6040c98,
435 0xbcbb0fc4,
436 0x010c9800,
437 0x98020d98,
438 0x004e060f,
439 0x013d7e08,
440 0x01acf000,
441 0x8b04a5f0,
442 0x98503000,
443 0xc4b6040c, 428 0xc4b6040c,
444 0x00bcbb0f, 429 0x00bcbb0f,
445 0x98020c98, 430 0x98000c98,
446 0x0f98030d, 431 0x000e010d,
447 0x02004e08,
448 0x00013d7e, 432 0x00013d7e,
449 0x00020a7e, 433 0x8b01acf0,
450 0xf40601f4, 434 0x98504000,
451/* 0x05b2: ctx_xfer_post */ 435 0xc4b6040c,
452 0x277e0712, 436 0x00bcbb0f,
453/* 0x05b6: ctx_xfer_done */ 437 0x98010c98,
454 0xde7e0002, 438 0x0f98020d,
455 0x00f80004, 439 0x08004e06,
456 0x00000000, 440 0x00013d7e,
441 0xf001acf0,
442 0x008b04a5,
443 0x0c985030,
444 0x0fc4b604,
445 0x9800bcbb,
446 0x0d98020c,
447 0x080f9803,
448 0x7e02004e,
449 0x7e00013d,
450 0xf400020a,
451 0x12f40601,
452/* 0x05b5: ctx_xfer_post */
453 0x02277e07,
454/* 0x05b9: ctx_xfer_done */
455 0x04e17e00,
456 0x0000f800,
457 0x00000000, 457 0x00000000,
458 0x00000000, 458 0x00000000,
459 0x00000000, 459 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
index 51f5c3c6e966..11bf363a6ae9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
@@ -289,7 +289,7 @@ uint32_t gm107_grgpc_code[] = {
289 0x020014fe, 289 0x020014fe,
290 0x12004002, 290 0x12004002,
291 0xbd0002f6, 291 0xbd0002f6,
292 0x05b04104, 292 0x05b34104,
293 0x400010fe, 293 0x400010fe,
294 0x00f60700, 294 0x00f60700,
295 0x0204bd00, 295 0x0204bd00,
@@ -308,259 +308,259 @@ uint32_t gm107_grgpc_code[] = {
308 0xc900800f, 308 0xc900800f,
309 0x0002f601, 309 0x0002f601,
310 0x308e04bd, 310 0x308e04bd,
311 0x24bd500c, 311 0xe5f0500c,
312 0x44bd34bd, 312 0xbd24bd01,
313/* 0x03b0: init_unk_loop */ 313/* 0x03b3: init_unk_loop */
314 0x0000657e, 314 0x7e44bd34,
315 0xf400f6b0, 315 0xb0000065,
316 0x010f0e0b, 316 0x0bf400f6,
317 0xfd04f2bb, 317 0xbb010f0e,
318 0x30b6054f, 318 0x4ffd04f2,
319/* 0x03c5: init_unk_next */ 319 0x0130b605,
320 0x0120b601, 320/* 0x03c8: init_unk_next */
321 0xb004e0b6, 321 0xb60120b6,
322 0x1bf40226, 322 0x26b004e0,
323/* 0x03d1: init_unk_done */ 323 0xe21bf402,
324 0x0703b5e2, 324/* 0x03d4: init_unk_done */
325 0x820804b5, 325 0xb50703b5,
326 0xcf020100, 326 0x00820804,
327 0x34bd0022, 327 0x22cf0201,
328 0x80082595, 328 0x9534bd00,
329 0xf601c000, 329 0x00800825,
330 0x05f601c0,
331 0x8004bd00,
332 0xf601c100,
330 0x04bd0005, 333 0x04bd0005,
331 0x01c10080, 334 0x98000e98,
332 0xbd0005f6, 335 0x207e010f,
333 0x000e9804, 336 0x2fbb0001,
334 0x7e010f98, 337 0x003fbb00,
335 0xbb000120, 338 0x98010e98,
336 0x3fbb002f, 339 0x207e020f,
337 0x010e9800, 340 0x0e980001,
338 0x7e020f98, 341 0x00effd05,
339 0x98000120, 342 0xbb002ebb,
340 0xeffd050e, 343 0x0e98003e,
341 0x002ebb00, 344 0x030f9802,
342 0x98003ebb, 345 0x0001207e,
343 0x0f98020e, 346 0xfd070e98,
344 0x01207e03, 347 0x2ebb00ef,
345 0x070e9800, 348 0x003ebb00,
346 0xbb00effd, 349 0x800235b6,
347 0x3ebb002e, 350 0xf601d300,
348 0x0235b600, 351 0x04bd0003,
349 0x01d30080, 352 0xb60825b6,
350 0xbd0003f6, 353 0x20b60635,
351 0x0825b604, 354 0x0130b601,
352 0xb60635b6, 355 0xb60824b6,
353 0x30b60120, 356 0x2fb20834,
354 0x0824b601, 357 0x0002687e,
355 0xb20834b6, 358 0xbb002fbb,
356 0x02687e2f, 359 0x3f0f003f,
357 0x002fbb00, 360 0x501d608e,
358 0x0f003fbb, 361 0xb201e5f0,
359 0x8effb23f, 362 0x008f7eff,
360 0xf0501d60, 363 0x8e0c0f00,
361 0x8f7e01e5,
362 0x0c0f0000,
363 0xa88effb2,
364 0xe5f0501d,
365 0x008f7e01,
366 0x03147e00,
367 0xb23f0f00,
368 0x1d608eff,
369 0x01e5f050,
370 0x00008f7e,
371 0xffb2000f,
372 0x501d9c8e,
373 0x7e01e5f0,
374 0x0f00008f,
375 0x03147e01,
376 0x8effb200,
377 0xf0501da8, 364 0xf0501da8,
378 0x8f7e01e5, 365 0xffb201e5,
379 0xff0f0000, 366 0x00008f7e,
380 0x988effb2, 367 0x0003147e,
368 0x608e3f0f,
381 0xe5f0501d, 369 0xe5f0501d,
382 0x008f7e01, 370 0x7effb201,
383 0xb2020f00, 371 0x0f00008f,
384 0x1da88eff, 372 0x1d9c8e00,
385 0x01e5f050, 373 0x01e5f050,
386 0x00008f7e, 374 0x8f7effb2,
375 0x010f0000,
387 0x0003147e, 376 0x0003147e,
388 0x85050498, 377 0x501da88e,
389 0x98504000, 378 0xb201e5f0,
390 0x64b60406, 379 0x008f7eff,
391 0x0056bb0f, 380 0x8eff0f00,
392/* 0x04e0: tpc_strand_init_tpc_loop */ 381 0xf0501d98,
393 0x05705eb8, 382 0xffb201e5,
394 0x00657e00,
395 0xbdf6b200,
396/* 0x04ed: tpc_strand_init_idx_loop */
397 0x605eb874,
398 0x7fb20005,
399 0x00008f7e,
400 0x05885eb8,
401 0x082f9500,
402 0x00008f7e,
403 0x058c5eb8,
404 0x082f9500,
405 0x00008f7e, 383 0x00008f7e,
406 0x05905eb8, 384 0xa88e020f,
407 0x00657e00,
408 0x06f5b600,
409 0xb601f0b6,
410 0x2fbb08f4,
411 0x003fbb00,
412 0xb60170b6,
413 0x1bf40162,
414 0x0050b7bf,
415 0x0142b608,
416 0x0fa81bf4,
417 0x8effb23f,
418 0xf0501d60,
419 0x8f7e01e5,
420 0x0d0f0000,
421 0xa88effb2,
422 0xe5f0501d, 385 0xe5f0501d,
423 0x008f7e01, 386 0x7effb201,
424 0x03147e00, 387 0x7e00008f,
425 0x01008000, 388 0x98000314,
426 0x0003f602, 389 0x00850504,
427 0x24bd04bd, 390 0x06985040,
428 0x801f29f0, 391 0x0f64b604,
429 0xf6023000, 392/* 0x04e3: tpc_strand_init_tpc_loop */
430 0x04bd0002, 393 0xb80056bb,
431/* 0x0574: main */ 394 0x0005705e,
432 0xf40031f4, 395 0x0000657e,
433 0x240d0028, 396 0x74bdf6b2,
434 0x0000377e, 397/* 0x04f0: tpc_strand_init_idx_loop */
435 0xb0f401f4, 398 0x05605eb8,
436 0x18f404e4, 399 0x7e7fb200,
437 0x0181fe1d, 400 0xb800008f,
438 0x20bd0602, 401 0x0005885e,
439 0xb60412fd, 402 0x7e082f95,
440 0x1efd01e4, 403 0xb800008f,
441 0x0018fe05, 404 0x00058c5e,
442 0x0006477e, 405 0x7e082f95,
443/* 0x05a3: main_not_ctx_xfer */ 406 0xb800008f,
444 0x94d40ef4, 407 0x0005905e,
445 0xf5f010ef, 408 0x0000657e,
446 0x02f87e01, 409 0xb606f5b6,
447 0xc70ef400, 410 0xf4b601f0,
448/* 0x05b0: ih */ 411 0x002fbb08,
449 0x88fe80f9, 412 0xb6003fbb,
450 0xf980f901, 413 0x62b60170,
451 0xf9a0f990, 414 0xbf1bf401,
452 0xf9d0f9b0, 415 0x080050b7,
453 0xbdf0f9e0, 416 0xf40142b6,
454 0x02004a04, 417 0x3f0fa81b,
455 0xc400aacf, 418 0x501d608e,
456 0x0bf404ab, 419 0xb201e5f0,
457 0x4e240d1f, 420 0x008f7eff,
458 0xeecf1a00, 421 0x8e0d0f00,
459 0x19004f00, 422 0xf0501da8,
460 0x7e00ffcf, 423 0xffb201e5,
461 0x0e000004, 424 0x00008f7e,
462 0x1d004001, 425 0x0003147e,
463 0xbd000ef6, 426 0x02010080,
464/* 0x05ed: ih_no_fifo */ 427 0xbd0003f6,
465 0x01004004, 428 0xf024bd04,
466 0xbd000af6, 429 0x00801f29,
467 0xfcf0fc04, 430 0x02f60230,
468 0xfcd0fce0, 431/* 0x0577: main */
469 0xfca0fcb0, 432 0xf404bd00,
470 0xfe80fc90, 433 0x28f40031,
471 0x80fc0088, 434 0x7e240d00,
472 0xf80032f4, 435 0xf4000037,
473/* 0x060d: hub_barrier_done */ 436 0xe4b0f401,
474 0x98010f01, 437 0x1d18f404,
475 0xfebb040e, 438 0x020181fe,
476 0x8effb204, 439 0xfd20bd06,
477 0x7e409418, 440 0xe4b60412,
478 0xf800008f, 441 0x051efd01,
479/* 0x0621: ctx_redswitch */ 442 0x7e0018fe,
480 0x80200f00, 443 0xf400064a,
444/* 0x05a6: main_not_ctx_xfer */
445 0xef94d40e,
446 0x01f5f010,
447 0x0002f87e,
448/* 0x05b3: ih */
449 0xf9c70ef4,
450 0x0188fe80,
451 0x90f980f9,
452 0xb0f9a0f9,
453 0xe0f9d0f9,
454 0x04bdf0f9,
455 0xcf02004a,
456 0xabc400aa,
457 0x1f0bf404,
458 0x004e240d,
459 0x00eecf1a,
460 0xcf19004f,
461 0x047e00ff,
462 0x010e0000,
463 0xf61d0040,
464 0x04bd000e,
465/* 0x05f0: ih_no_fifo */
466 0xf6010040,
467 0x04bd000a,
468 0xe0fcf0fc,
469 0xb0fcd0fc,
470 0x90fca0fc,
471 0x88fe80fc,
472 0xf480fc00,
473 0x01f80032,
474/* 0x0610: hub_barrier_done */
475 0x0e98010f,
476 0x04febb04,
477 0x188effb2,
478 0x8f7e4094,
479 0x00f80000,
480/* 0x0624: ctx_redswitch */
481 0x0080200f,
482 0x0ff60185,
483 0x0e04bd00,
484/* 0x0631: ctx_redswitch_delay */
485 0x01e2b608,
486 0xf1fd1bf4,
487 0xf10800f5,
488 0x800200f5,
481 0xf6018500, 489 0xf6018500,
482 0x04bd000f, 490 0x04bd000f,
483/* 0x062e: ctx_redswitch_delay */ 491/* 0x064a: ctx_xfer */
484 0xe2b6080e, 492 0x008000f8,
485 0xfd1bf401, 493 0x0ff60281,
486 0x0800f5f1, 494 0x8e04bd00,
487 0x0200f5f1, 495 0xf0501dc4,
488 0x01850080, 496 0xffb201e5,
489 0xbd000ff6, 497 0x00008f7e,
490/* 0x0647: ctx_xfer */ 498 0x7e0711f4,
491 0x8000f804, 499/* 0x0667: ctx_xfer_not_load */
492 0xf6028100, 500 0x7e000624,
493 0x04bd000f, 501 0xbd000216,
494 0xc48effb2, 502 0x47fc8024,
495 0xe5f0501d,
496 0x008f7e01,
497 0x0711f400,
498 0x0006217e,
499/* 0x0664: ctx_xfer_not_load */
500 0x0002167e,
501 0xfc8024bd,
502 0x02f60247,
503 0xf004bd00,
504 0x20b6012c,
505 0x4afc8003,
506 0x0002f602, 503 0x0002f602,
507 0x0c0f04bd, 504 0x2cf004bd,
508 0xa88effb2, 505 0x0320b601,
509 0xe5f0501d, 506 0x024afc80,
510 0x008f7e01, 507 0xbd0002f6,
511 0x03147e00, 508 0x8e0c0f04,
512 0xb23f0f00, 509 0xf0501da8,
513 0x1d608eff, 510 0xffb201e5,
514 0x01e5f050,
515 0x00008f7e, 511 0x00008f7e,
516 0xffb2000f, 512 0x0003147e,
517 0x501d9c8e, 513 0x608e3f0f,
518 0x7e01e5f0, 514 0xe5f0501d,
515 0x7effb201,
519 0x0f00008f, 516 0x0f00008f,
520 0x03147e01, 517 0x1d9c8e00,
521 0x01fcf000,
522 0xb203f0b6,
523 0x1da88eff,
524 0x01e5f050, 518 0x01e5f050,
525 0x00008f7e, 519 0x8f7effb2,
526 0xf001acf0, 520 0x010f0000,
527 0x008b02a5, 521 0x0003147e,
528 0x0c985000, 522 0xb601fcf0,
529 0x0fc4b604, 523 0xa88e03f0,
530 0x9800bcbb, 524 0xe5f0501d,
531 0x0d98000c, 525 0x7effb201,
532 0x7e000e01, 526 0xf000008f,
533 0xf000013d,
534 0x008b01ac,
535 0x0c985040,
536 0x0fc4b604,
537 0x9800bcbb,
538 0x0d98010c,
539 0x060f9802,
540 0x7e08004e,
541 0xf000013d,
542 0xa5f001ac, 527 0xa5f001ac,
543 0x30008b04, 528 0x00008b02,
544 0x040c9850, 529 0x040c9850,
545 0xbb0fc4b6, 530 0xbb0fc4b6,
546 0x0c9800bc, 531 0x0c9800bc,
547 0x030d9802, 532 0x010d9800,
548 0x4e080f98, 533 0x3d7e000e,
549 0x3d7e0200, 534 0xacf00001,
550 0x0a7e0001, 535 0x40008b01,
551 0x147e0002, 536 0x040c9850,
552 0x01f40003, 537 0xbb0fc4b6,
553 0x1a12f406, 538 0x0c9800bc,
554/* 0x073c: ctx_xfer_post */ 539 0x020d9801,
555 0x0002277e, 540 0x4e060f98,
556 0xffb20d0f, 541 0x3d7e0800,
557 0x501da88e, 542 0xacf00001,
558 0x7e01e5f0, 543 0x04a5f001,
559 0x7e00008f, 544 0x5030008b,
560/* 0x0753: ctx_xfer_done */ 545 0xb6040c98,
561 0x7e000314, 546 0xbcbb0fc4,
562 0xf800060d, 547 0x020c9800,
563 0x00000000, 548 0x98030d98,
549 0x004e080f,
550 0x013d7e02,
551 0x020a7e00,
552 0x03147e00,
553 0x0601f400,
554/* 0x073f: ctx_xfer_post */
555 0x7e1a12f4,
556 0x0f000227,
557 0x1da88e0d,
558 0x01e5f050,
559 0x8f7effb2,
560 0x147e0000,
561/* 0x0756: ctx_xfer_done */
562 0x107e0003,
563 0x00f80006,
564 0x00000000, 564 0x00000000,
565 0x00000000, 565 0x00000000,
566 0x00000000, 566 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index dda7a7d224c9..9f5dfc85147a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -143,7 +143,7 @@ gf100_gr_zbc_depth_get(struct gf100_gr *gr, int format,
143static int 143static int
144gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size) 144gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size)
145{ 145{
146 struct gf100_gr *gr = (void *)object->engine; 146 struct gf100_gr *gr = gf100_gr(nvkm_gr(object->engine));
147 union { 147 union {
148 struct fermi_a_zbc_color_v0 v0; 148 struct fermi_a_zbc_color_v0 v0;
149 } *args = data; 149 } *args = data;
@@ -189,7 +189,7 @@ gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size)
189static int 189static int
190gf100_fermi_mthd_zbc_depth(struct nvkm_object *object, void *data, u32 size) 190gf100_fermi_mthd_zbc_depth(struct nvkm_object *object, void *data, u32 size)
191{ 191{
192 struct gf100_gr *gr = (void *)object->engine; 192 struct gf100_gr *gr = gf100_gr(nvkm_gr(object->engine));
193 union { 193 union {
194 struct fermi_a_zbc_depth_v0 v0; 194 struct fermi_a_zbc_depth_v0 v0;
195 } *args = data; 195 } *args = data;
@@ -1530,6 +1530,8 @@ gf100_gr_oneinit(struct nvkm_gr *base)
1530 gr->ppc_nr[i] = gr->func->ppc_nr; 1530 gr->ppc_nr[i] = gr->func->ppc_nr;
1531 for (j = 0; j < gr->ppc_nr[i]; j++) { 1531 for (j = 0; j < gr->ppc_nr[i]; j++) {
1532 u8 mask = nvkm_rd32(device, GPC_UNIT(i, 0x0c30 + (j * 4))); 1532 u8 mask = nvkm_rd32(device, GPC_UNIT(i, 0x0c30 + (j * 4)));
1533 if (mask)
1534 gr->ppc_mask[i] |= (1 << j);
1533 gr->ppc_tpc_nr[i][j] = hweight8(mask); 1535 gr->ppc_tpc_nr[i][j] = hweight8(mask);
1534 } 1536 }
1535 } 1537 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 4611961b1187..02e78b8d93f6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -97,6 +97,7 @@ struct gf100_gr {
97 u8 tpc_nr[GPC_MAX]; 97 u8 tpc_nr[GPC_MAX];
98 u8 tpc_total; 98 u8 tpc_total;
99 u8 ppc_nr[GPC_MAX]; 99 u8 ppc_nr[GPC_MAX];
100 u8 ppc_mask[GPC_MAX];
100 u8 ppc_tpc_nr[GPC_MAX][4]; 101 u8 ppc_tpc_nr[GPC_MAX][4];
101 102
102 struct nvkm_memory *unk4188b4; 103 struct nvkm_memory *unk4188b4;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
index 895ba74057d4..1d7dd38292b3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
@@ -97,7 +97,9 @@ static void *
97nvkm_instobj_dtor(struct nvkm_memory *memory) 97nvkm_instobj_dtor(struct nvkm_memory *memory)
98{ 98{
99 struct nvkm_instobj *iobj = nvkm_instobj(memory); 99 struct nvkm_instobj *iobj = nvkm_instobj(memory);
100 spin_lock(&iobj->imem->lock);
100 list_del(&iobj->head); 101 list_del(&iobj->head);
102 spin_unlock(&iobj->imem->lock);
101 nvkm_memory_del(&iobj->parent); 103 nvkm_memory_del(&iobj->parent);
102 return iobj; 104 return iobj;
103} 105}
@@ -190,7 +192,9 @@ nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero,
190 nvkm_memory_ctor(&nvkm_instobj_func_slow, &iobj->memory); 192 nvkm_memory_ctor(&nvkm_instobj_func_slow, &iobj->memory);
191 iobj->parent = memory; 193 iobj->parent = memory;
192 iobj->imem = imem; 194 iobj->imem = imem;
195 spin_lock(&iobj->imem->lock);
193 list_add_tail(&iobj->head, &imem->list); 196 list_add_tail(&iobj->head, &imem->list);
197 spin_unlock(&iobj->imem->lock);
194 memory = &iobj->memory; 198 memory = &iobj->memory;
195 } 199 }
196 200
@@ -309,5 +313,6 @@ nvkm_instmem_ctor(const struct nvkm_instmem_func *func,
309{ 313{
310 nvkm_subdev_ctor(&nvkm_instmem, device, index, 0, &imem->subdev); 314 nvkm_subdev_ctor(&nvkm_instmem, device, index, 0, &imem->subdev);
311 imem->func = func; 315 imem->func = func;
316 spin_lock_init(&imem->lock);
312 INIT_LIST_HEAD(&imem->list); 317 INIT_LIST_HEAD(&imem->list);
313} 318}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
index b61509e26ec9..b735173a18ff 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
@@ -59,7 +59,7 @@ gk104_volt_set(struct nvkm_volt *base, u32 uv)
59 duty = (uv - bios->base) * div / bios->pwm_range; 59 duty = (uv - bios->base) * div / bios->pwm_range;
60 60
61 nvkm_wr32(device, 0x20340, div); 61 nvkm_wr32(device, 0x20340, div);
62 nvkm_wr32(device, 0x20344, 0x8000000 | duty); 62 nvkm_wr32(device, 0x20344, 0x80000000 | duty);
63 63
64 return 0; 64 return 0;
65} 65}
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index ad09590e8a46..2ed0754ed19e 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -524,7 +524,7 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
524 omap_crtc->mgr = omap_dss_get_overlay_manager(channel); 524 omap_crtc->mgr = omap_dss_get_overlay_manager(channel);
525 525
526 ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL, 526 ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL,
527 &omap_crtc_funcs); 527 &omap_crtc_funcs, NULL);
528 if (ret < 0) { 528 if (ret < 0) {
529 kfree(omap_crtc); 529 kfree(omap_crtc);
530 return NULL; 530 return NULL;
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 5c367aad8a6e..130fca70bfd7 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -172,9 +172,9 @@ void copy_timings_drm_to_omap(struct omap_video_timings *timings,
172uint32_t omap_framebuffer_get_formats(uint32_t *pixel_formats, 172uint32_t omap_framebuffer_get_formats(uint32_t *pixel_formats,
173 uint32_t max_formats, enum omap_color_mode supported_modes); 173 uint32_t max_formats, enum omap_color_mode supported_modes);
174struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev, 174struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
175 struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd); 175 struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd);
176struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, 176struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
177 struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos); 177 const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
178struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p); 178struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p);
179int omap_framebuffer_pin(struct drm_framebuffer *fb); 179int omap_framebuffer_pin(struct drm_framebuffer *fb);
180void omap_framebuffer_unpin(struct drm_framebuffer *fb); 180void omap_framebuffer_unpin(struct drm_framebuffer *fb);
@@ -248,7 +248,7 @@ struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder);
248 248
249static inline int objects_lookup(struct drm_device *dev, 249static inline int objects_lookup(struct drm_device *dev,
250 struct drm_file *filp, uint32_t pixel_format, 250 struct drm_file *filp, uint32_t pixel_format,
251 struct drm_gem_object **bos, uint32_t *handles) 251 struct drm_gem_object **bos, const uint32_t *handles)
252{ 252{
253 int i, n = drm_format_num_planes(pixel_format); 253 int i, n = drm_format_num_planes(pixel_format);
254 254
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 7d9b32a0eb43..0c104ad7ef66 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -178,7 +178,7 @@ struct drm_encoder *omap_encoder_init(struct drm_device *dev,
178 encoder = &omap_encoder->base; 178 encoder = &omap_encoder->base;
179 179
180 drm_encoder_init(dev, encoder, &omap_encoder_funcs, 180 drm_encoder_init(dev, encoder, &omap_encoder_funcs,
181 DRM_MODE_ENCODER_TMDS); 181 DRM_MODE_ENCODER_TMDS, NULL);
182 drm_encoder_helper_add(encoder, &omap_encoder_helper_funcs); 182 drm_encoder_helper_add(encoder, &omap_encoder_helper_funcs);
183 183
184 return encoder; 184 return encoder;
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 636a1f921569..ad202dfc1a49 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -364,7 +364,7 @@ void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
364#endif 364#endif
365 365
366struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev, 366struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
367 struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd) 367 struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd)
368{ 368{
369 struct drm_gem_object *bos[4]; 369 struct drm_gem_object *bos[4];
370 struct drm_framebuffer *fb; 370 struct drm_framebuffer *fb;
@@ -386,7 +386,7 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
386} 386}
387 387
388struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, 388struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
389 struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos) 389 const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
390{ 390{
391 struct omap_framebuffer *omap_fb = NULL; 391 struct omap_framebuffer *omap_fb = NULL;
392 struct drm_framebuffer *fb = NULL; 392 struct drm_framebuffer *fb = NULL;
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 3054bda72688..d5ecabd6c14c 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -366,7 +366,7 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
366 366
367 ret = drm_universal_plane_init(dev, plane, (1 << priv->num_crtcs) - 1, 367 ret = drm_universal_plane_init(dev, plane, (1 << priv->num_crtcs) - 1,
368 &omap_plane_funcs, omap_plane->formats, 368 &omap_plane_funcs, omap_plane->formats,
369 omap_plane->nformats, type); 369 omap_plane->nformats, type, NULL);
370 if (ret < 0) 370 if (ret < 0)
371 goto error; 371 goto error;
372 372
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 183aea1abebc..86276519b2ef 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -521,7 +521,7 @@ static const struct drm_framebuffer_funcs qxl_fb_funcs = {
521int 521int
522qxl_framebuffer_init(struct drm_device *dev, 522qxl_framebuffer_init(struct drm_device *dev,
523 struct qxl_framebuffer *qfb, 523 struct qxl_framebuffer *qfb,
524 struct drm_mode_fb_cmd2 *mode_cmd, 524 const struct drm_mode_fb_cmd2 *mode_cmd,
525 struct drm_gem_object *obj) 525 struct drm_gem_object *obj)
526{ 526{
527 int ret; 527 int ret;
@@ -876,16 +876,6 @@ static const struct drm_connector_helper_funcs qxl_connector_helper_funcs = {
876 .best_encoder = qxl_best_encoder, 876 .best_encoder = qxl_best_encoder,
877}; 877};
878 878
879static void qxl_conn_save(struct drm_connector *connector)
880{
881 DRM_DEBUG("\n");
882}
883
884static void qxl_conn_restore(struct drm_connector *connector)
885{
886 DRM_DEBUG("\n");
887}
888
889static enum drm_connector_status qxl_conn_detect( 879static enum drm_connector_status qxl_conn_detect(
890 struct drm_connector *connector, 880 struct drm_connector *connector,
891 bool force) 881 bool force)
@@ -932,10 +922,8 @@ static void qxl_conn_destroy(struct drm_connector *connector)
932 922
933static const struct drm_connector_funcs qxl_connector_funcs = { 923static const struct drm_connector_funcs qxl_connector_funcs = {
934 .dpms = drm_helper_connector_dpms, 924 .dpms = drm_helper_connector_dpms,
935 .save = qxl_conn_save,
936 .restore = qxl_conn_restore,
937 .detect = qxl_conn_detect, 925 .detect = qxl_conn_detect,
938 .fill_modes = drm_helper_probe_single_connector_modes_nomerge, 926 .fill_modes = drm_helper_probe_single_connector_modes,
939 .set_property = qxl_conn_set_property, 927 .set_property = qxl_conn_set_property,
940 .destroy = qxl_conn_destroy, 928 .destroy = qxl_conn_destroy,
941}; 929};
@@ -980,7 +968,7 @@ static int qdev_output_init(struct drm_device *dev, int num_output)
980 &qxl_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL); 968 &qxl_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL);
981 969
982 drm_encoder_init(dev, &qxl_output->enc, &qxl_enc_funcs, 970 drm_encoder_init(dev, &qxl_output->enc, &qxl_enc_funcs,
983 DRM_MODE_ENCODER_VIRTUAL); 971 DRM_MODE_ENCODER_VIRTUAL, NULL);
984 972
985 /* we get HPD via client monitors config */ 973 /* we get HPD via client monitors config */
986 connector->polled = DRM_CONNECTOR_POLL_HPD; 974 connector->polled = DRM_CONNECTOR_POLL_HPD;
@@ -1003,7 +991,7 @@ static int qdev_output_init(struct drm_device *dev, int num_output)
1003static struct drm_framebuffer * 991static struct drm_framebuffer *
1004qxl_user_framebuffer_create(struct drm_device *dev, 992qxl_user_framebuffer_create(struct drm_device *dev,
1005 struct drm_file *file_priv, 993 struct drm_file *file_priv,
1006 struct drm_mode_fb_cmd2 *mode_cmd) 994 const struct drm_mode_fb_cmd2 *mode_cmd)
1007{ 995{
1008 struct drm_gem_object *obj; 996 struct drm_gem_object *obj;
1009 struct qxl_framebuffer *qxl_fb; 997 struct qxl_framebuffer *qxl_fb;
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 01a86948eb8c..6e6b9b1519b8 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -390,7 +390,7 @@ void qxl_fbdev_set_suspend(struct qxl_device *qdev, int state);
390int 390int
391qxl_framebuffer_init(struct drm_device *dev, 391qxl_framebuffer_init(struct drm_device *dev,
392 struct qxl_framebuffer *rfb, 392 struct qxl_framebuffer *rfb,
393 struct drm_mode_fb_cmd2 *mode_cmd, 393 const struct drm_mode_fb_cmd2 *mode_cmd,
394 struct drm_gem_object *obj); 394 struct drm_gem_object *obj);
395void qxl_display_read_client_monitors_config(struct qxl_device *qdev); 395void qxl_display_read_client_monitors_config(struct qxl_device *qdev);
396void qxl_send_monitors_config(struct qxl_device *qdev); 396void qxl_send_monitors_config(struct qxl_device *qdev);
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index c4a552637c93..7136e521e6db 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -40,7 +40,6 @@
40struct qxl_fbdev { 40struct qxl_fbdev {
41 struct drm_fb_helper helper; 41 struct drm_fb_helper helper;
42 struct qxl_framebuffer qfb; 42 struct qxl_framebuffer qfb;
43 struct list_head fbdev_list;
44 struct qxl_device *qdev; 43 struct qxl_device *qdev;
45 44
46 spinlock_t delayed_ops_lock; 45 spinlock_t delayed_ops_lock;
@@ -283,7 +282,7 @@ int qxl_get_handle_for_primary_fb(struct qxl_device *qdev,
283} 282}
284 283
285static int qxlfb_create_pinned_object(struct qxl_fbdev *qfbdev, 284static int qxlfb_create_pinned_object(struct qxl_fbdev *qfbdev,
286 struct drm_mode_fb_cmd2 *mode_cmd, 285 const struct drm_mode_fb_cmd2 *mode_cmd,
287 struct drm_gem_object **gobj_p) 286 struct drm_gem_object **gobj_p)
288{ 287{
289 struct qxl_device *qdev = qfbdev->qdev; 288 struct qxl_device *qdev = qfbdev->qdev;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index dac78ad24b31..801dd60ac192 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -25,6 +25,7 @@
25 */ 25 */
26#include <drm/drmP.h> 26#include <drm/drmP.h>
27#include <drm/drm_crtc_helper.h> 27#include <drm/drm_crtc_helper.h>
28#include <drm/drm_fb_helper.h>
28#include <drm/radeon_drm.h> 29#include <drm/radeon_drm.h>
29#include <drm/drm_fixed.h> 30#include <drm/drm_fixed.h>
30#include "radeon.h" 31#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index bb292143997e..01b20e14a247 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -2767,23 +2767,27 @@ radeon_add_atom_encoder(struct drm_device *dev,
2767 case ENCODER_OBJECT_ID_INTERNAL_LVTM1: 2767 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
2768 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 2768 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
2769 radeon_encoder->rmx_type = RMX_FULL; 2769 radeon_encoder->rmx_type = RMX_FULL;
2770 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); 2770 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
2771 DRM_MODE_ENCODER_LVDS, NULL);
2771 radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder); 2772 radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
2772 } else { 2773 } else {
2773 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); 2774 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
2775 DRM_MODE_ENCODER_TMDS, NULL);
2774 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); 2776 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
2775 } 2777 }
2776 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); 2778 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
2777 break; 2779 break;
2778 case ENCODER_OBJECT_ID_INTERNAL_DAC1: 2780 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
2779 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); 2781 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
2782 DRM_MODE_ENCODER_DAC, NULL);
2780 radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder); 2783 radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
2781 drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs); 2784 drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
2782 break; 2785 break;
2783 case ENCODER_OBJECT_ID_INTERNAL_DAC2: 2786 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
2784 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: 2787 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
2785 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: 2788 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
2786 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TVDAC); 2789 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
2790 DRM_MODE_ENCODER_TVDAC, NULL);
2787 radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder); 2791 radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
2788 drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs); 2792 drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
2789 break; 2793 break;
@@ -2797,13 +2801,16 @@ radeon_add_atom_encoder(struct drm_device *dev,
2797 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: 2801 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2798 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 2802 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
2799 radeon_encoder->rmx_type = RMX_FULL; 2803 radeon_encoder->rmx_type = RMX_FULL;
2800 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); 2804 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
2805 DRM_MODE_ENCODER_LVDS, NULL);
2801 radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder); 2806 radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
2802 } else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { 2807 } else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
2803 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); 2808 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
2809 DRM_MODE_ENCODER_DAC, NULL);
2804 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); 2810 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
2805 } else { 2811 } else {
2806 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); 2812 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
2813 DRM_MODE_ENCODER_TMDS, NULL);
2807 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); 2814 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
2808 } 2815 }
2809 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); 2816 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
@@ -2820,11 +2827,14 @@ radeon_add_atom_encoder(struct drm_device *dev,
2820 /* these are handled by the primary encoders */ 2827 /* these are handled by the primary encoders */
2821 radeon_encoder->is_ext_encoder = true; 2828 radeon_encoder->is_ext_encoder = true;
2822 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 2829 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
2823 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); 2830 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
2831 DRM_MODE_ENCODER_LVDS, NULL);
2824 else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) 2832 else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
2825 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); 2833 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
2834 DRM_MODE_ENCODER_DAC, NULL);
2826 else 2835 else
2827 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); 2836 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
2837 DRM_MODE_ENCODER_TMDS, NULL);
2828 drm_encoder_helper_add(encoder, &radeon_atom_ext_helper_funcs); 2838 drm_encoder_helper_add(encoder, &radeon_atom_ext_helper_funcs);
2829 break; 2839 break;
2830 } 2840 }
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 248953d2fdb7..0154db43860c 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -8472,7 +8472,7 @@ restart_ih:
8472 if (queue_dp) 8472 if (queue_dp)
8473 schedule_work(&rdev->dp_work); 8473 schedule_work(&rdev->dp_work);
8474 if (queue_hotplug) 8474 if (queue_hotplug)
8475 schedule_work(&rdev->hotplug_work); 8475 schedule_delayed_work(&rdev->hotplug_work, 0);
8476 if (queue_reset) { 8476 if (queue_reset) {
8477 rdev->needs_reset = true; 8477 rdev->needs_reset = true;
8478 wake_up_all(&rdev->fence_queue); 8478 wake_up_all(&rdev->fence_queue);
@@ -9630,6 +9630,9 @@ static void dce8_program_watermarks(struct radeon_device *rdev,
9630 (rdev->disp_priority == 2)) { 9630 (rdev->disp_priority == 2)) {
9631 DRM_DEBUG_KMS("force priority to high\n"); 9631 DRM_DEBUG_KMS("force priority to high\n");
9632 } 9632 }
9633
9634 /* Save number of lines the linebuffer leads before the scanout */
9635 radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
9633 } 9636 }
9634 9637
9635 /* select wm A */ 9638 /* select wm A */
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 7f33767d7ed6..2ad462896896 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2372,6 +2372,9 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
2372 c.full = dfixed_div(c, a); 2372 c.full = dfixed_div(c, a);
2373 priority_b_mark = dfixed_trunc(c); 2373 priority_b_mark = dfixed_trunc(c);
2374 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK; 2374 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
2375
2376 /* Save number of lines the linebuffer leads before the scanout */
2377 radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
2375 } 2378 }
2376 2379
2377 /* select wm A */ 2380 /* select wm A */
@@ -5344,7 +5347,7 @@ restart_ih:
5344 if (queue_dp) 5347 if (queue_dp)
5345 schedule_work(&rdev->dp_work); 5348 schedule_work(&rdev->dp_work);
5346 if (queue_hotplug) 5349 if (queue_hotplug)
5347 schedule_work(&rdev->hotplug_work); 5350 schedule_delayed_work(&rdev->hotplug_work, 0);
5348 if (queue_hdmi) 5351 if (queue_hdmi)
5349 schedule_work(&rdev->audio_work); 5352 schedule_work(&rdev->audio_work);
5350 if (queue_thermal && rdev->pm.dpm_enabled) 5353 if (queue_thermal && rdev->pm.dpm_enabled)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 238b13f045c1..9e7e2bf03b81 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -806,7 +806,7 @@ int r100_irq_process(struct radeon_device *rdev)
806 status = r100_irq_ack(rdev); 806 status = r100_irq_ack(rdev);
807 } 807 }
808 if (queue_hotplug) 808 if (queue_hotplug)
809 schedule_work(&rdev->hotplug_work); 809 schedule_delayed_work(&rdev->hotplug_work, 0);
810 if (rdev->msi_enabled) { 810 if (rdev->msi_enabled) {
811 switch (rdev->family) { 811 switch (rdev->family) {
812 case CHIP_RS400: 812 case CHIP_RS400:
@@ -3217,6 +3217,9 @@ void r100_bandwidth_update(struct radeon_device *rdev)
3217 uint32_t pixel_bytes1 = 0; 3217 uint32_t pixel_bytes1 = 0;
3218 uint32_t pixel_bytes2 = 0; 3218 uint32_t pixel_bytes2 = 0;
3219 3219
3220 /* Guess line buffer size to be 8192 pixels */
3221 u32 lb_size = 8192;
3222
3220 if (!rdev->mode_info.mode_config_initialized) 3223 if (!rdev->mode_info.mode_config_initialized)
3221 return; 3224 return;
3222 3225
@@ -3631,6 +3634,13 @@ void r100_bandwidth_update(struct radeon_device *rdev)
3631 DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n", 3634 DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n",
3632 (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); 3635 (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
3633 } 3636 }
3637
3638 /* Save number of lines the linebuffer leads before the scanout */
3639 if (mode1)
3640 rdev->mode_info.crtcs[0]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode1->crtc_hdisplay);
3641
3642 if (mode2)
3643 rdev->mode_info.crtcs[1]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode2->crtc_hdisplay);
3634} 3644}
3635 3645
3636int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) 3646int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 4ea5b10ff5f4..cc2fdf0be37a 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -4276,7 +4276,7 @@ restart_ih:
4276 WREG32(IH_RB_RPTR, rptr); 4276 WREG32(IH_RB_RPTR, rptr);
4277 } 4277 }
4278 if (queue_hotplug) 4278 if (queue_hotplug)
4279 schedule_work(&rdev->hotplug_work); 4279 schedule_delayed_work(&rdev->hotplug_work, 0);
4280 if (queue_hdmi) 4280 if (queue_hdmi)
4281 schedule_work(&rdev->audio_work); 4281 schedule_work(&rdev->audio_work);
4282 if (queue_thermal && rdev->pm.dpm_enabled) 4282 if (queue_thermal && rdev->pm.dpm_enabled)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index b6cbd816537e..87db64983ea8 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -2414,7 +2414,7 @@ struct radeon_device {
2414 struct r600_ih ih; /* r6/700 interrupt ring */ 2414 struct r600_ih ih; /* r6/700 interrupt ring */
2415 struct radeon_rlc rlc; 2415 struct radeon_rlc rlc;
2416 struct radeon_mec mec; 2416 struct radeon_mec mec;
2417 struct work_struct hotplug_work; 2417 struct delayed_work hotplug_work;
2418 struct work_struct dp_work; 2418 struct work_struct dp_work;
2419 struct work_struct audio_work; 2419 struct work_struct audio_work;
2420 int num_crtc; /* number of crtcs */ 2420 int num_crtc; /* number of crtcs */
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index fe994aac3b04..c77d349c561c 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -54,6 +54,9 @@ static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = {
54 /* Intel 82855PM host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #195051) */ 54 /* Intel 82855PM host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #195051) */
55 { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e50, 55 { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e50,
56 PCI_VENDOR_ID_IBM, 0x0550, 1}, 56 PCI_VENDOR_ID_IBM, 0x0550, 1},
57 /* Intel 82855PM host bridge / RV250/M9 GL [Mobility FireGL 9000/Radeon 9000] needs AGPMode 1 (Thinkpad T40p) */
58 { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c66,
59 PCI_VENDOR_ID_IBM, 0x054d, 1},
57 /* Intel 82855PM host bridge / Mobility M7 needs AGPMode 1 */ 60 /* Intel 82855PM host bridge / Mobility M7 needs AGPMode 1 */
58 { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c57, 61 { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c57,
59 PCI_VENDOR_ID_IBM, 0x0530, 1}, 62 PCI_VENDOR_ID_IBM, 0x0530, 1},
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 5a2cafb4f1bc..340f3f549f29 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1234,13 +1234,32 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
1234 if (r < 0) 1234 if (r < 0)
1235 return connector_status_disconnected; 1235 return connector_status_disconnected;
1236 1236
1237 if (radeon_connector->detected_hpd_without_ddc) {
1238 force = true;
1239 radeon_connector->detected_hpd_without_ddc = false;
1240 }
1241
1237 if (!force && radeon_check_hpd_status_unchanged(connector)) { 1242 if (!force && radeon_check_hpd_status_unchanged(connector)) {
1238 ret = connector->status; 1243 ret = connector->status;
1239 goto exit; 1244 goto exit;
1240 } 1245 }
1241 1246
1242 if (radeon_connector->ddc_bus) 1247 if (radeon_connector->ddc_bus) {
1243 dret = radeon_ddc_probe(radeon_connector, false); 1248 dret = radeon_ddc_probe(radeon_connector, false);
1249
1250 /* Sometimes the pins required for the DDC probe on DVI
1251 * connectors don't make contact at the same time that the ones
1252 * for HPD do. If the DDC probe fails even though we had an HPD
1253 * signal, try again later */
1254 if (!dret && !force &&
1255 connector->status != connector_status_connected) {
1256 DRM_DEBUG_KMS("hpd detected without ddc, retrying in 1 second\n");
1257 radeon_connector->detected_hpd_without_ddc = true;
1258 schedule_delayed_work(&rdev->hotplug_work,
1259 msecs_to_jiffies(1000));
1260 goto exit;
1261 }
1262 }
1244 if (dret) { 1263 if (dret) {
1245 radeon_connector->detected_by_load = false; 1264 radeon_connector->detected_by_load = false;
1246 radeon_connector_free_edid(connector); 1265 radeon_connector_free_edid(connector);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index a8d9927ed9eb..b3bb92368ae0 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -322,7 +322,9 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id)
322 * to complete in this vblank? 322 * to complete in this vblank?
323 */ 323 */
324 if (update_pending && 324 if (update_pending &&
325 (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id, 0, 325 (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev,
326 crtc_id,
327 USE_REAL_VBLANKSTART,
326 &vpos, &hpos, NULL, NULL, 328 &vpos, &hpos, NULL, NULL,
327 &rdev->mode_info.crtcs[crtc_id]->base.hwmode)) && 329 &rdev->mode_info.crtcs[crtc_id]->base.hwmode)) &&
328 ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) || 330 ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) ||
@@ -401,6 +403,8 @@ static void radeon_flip_work_func(struct work_struct *__work)
401 struct drm_crtc *crtc = &radeon_crtc->base; 403 struct drm_crtc *crtc = &radeon_crtc->base;
402 unsigned long flags; 404 unsigned long flags;
403 int r; 405 int r;
406 int vpos, hpos, stat, min_udelay;
407 struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
404 408
405 down_read(&rdev->exclusive_lock); 409 down_read(&rdev->exclusive_lock);
406 if (work->fence) { 410 if (work->fence) {
@@ -437,6 +441,41 @@ static void radeon_flip_work_func(struct work_struct *__work)
437 /* set the proper interrupt */ 441 /* set the proper interrupt */
438 radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id); 442 radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);
439 443
444 /* If this happens to execute within the "virtually extended" vblank
445 * interval before the start of the real vblank interval then it needs
446 * to delay programming the mmio flip until the real vblank is entered.
447 * This prevents completing a flip too early due to the way we fudge
448 * our vblank counter and vblank timestamps in order to work around the
449 * problem that the hw fires vblank interrupts before actual start of
450 * vblank (when line buffer refilling is done for a frame). It
451 * complements the fudging logic in radeon_get_crtc_scanoutpos() for
452 * timestamping and radeon_get_vblank_counter_kms() for vblank counts.
453 *
454 * In practice this won't execute very often unless on very fast
455 * machines because the time window for this to happen is very small.
456 */
457 for (;;) {
458 /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
459 * start in hpos, and to the "fudged earlier" vblank start in
460 * vpos.
461 */
462 stat = radeon_get_crtc_scanoutpos(rdev->ddev, work->crtc_id,
463 GET_DISTANCE_TO_VBLANKSTART,
464 &vpos, &hpos, NULL, NULL,
465 &crtc->hwmode);
466
467 if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
468 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) ||
469 !(vpos >= 0 && hpos <= 0))
470 break;
471
472 /* Sleep at least until estimated real start of hw vblank */
473 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
474 min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
475 usleep_range(min_udelay, 2 * min_udelay);
476 spin_lock_irqsave(&crtc->dev->event_lock, flags);
477 };
478
440 /* do the flip (mmio) */ 479 /* do the flip (mmio) */
441 radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base); 480 radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base);
442 481
@@ -1292,7 +1331,7 @@ static const struct drm_framebuffer_funcs radeon_fb_funcs = {
1292int 1331int
1293radeon_framebuffer_init(struct drm_device *dev, 1332radeon_framebuffer_init(struct drm_device *dev,
1294 struct radeon_framebuffer *rfb, 1333 struct radeon_framebuffer *rfb,
1295 struct drm_mode_fb_cmd2 *mode_cmd, 1334 const struct drm_mode_fb_cmd2 *mode_cmd,
1296 struct drm_gem_object *obj) 1335 struct drm_gem_object *obj)
1297{ 1336{
1298 int ret; 1337 int ret;
@@ -1309,7 +1348,7 @@ radeon_framebuffer_init(struct drm_device *dev,
1309static struct drm_framebuffer * 1348static struct drm_framebuffer *
1310radeon_user_framebuffer_create(struct drm_device *dev, 1349radeon_user_framebuffer_create(struct drm_device *dev,
1311 struct drm_file *file_priv, 1350 struct drm_file *file_priv,
1312 struct drm_mode_fb_cmd2 *mode_cmd) 1351 const struct drm_mode_fb_cmd2 *mode_cmd)
1313{ 1352{
1314 struct drm_gem_object *obj; 1353 struct drm_gem_object *obj;
1315 struct radeon_framebuffer *radeon_fb; 1354 struct radeon_framebuffer *radeon_fb;
@@ -1768,6 +1807,15 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
1768 * \param dev Device to query. 1807 * \param dev Device to query.
1769 * \param crtc Crtc to query. 1808 * \param crtc Crtc to query.
1770 * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0). 1809 * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
1810 * For driver internal use only also supports these flags:
1811 *
1812 * USE_REAL_VBLANKSTART to use the real start of vblank instead
1813 * of a fudged earlier start of vblank.
1814 *
1815 * GET_DISTANCE_TO_VBLANKSTART to return distance to the
1816 * fudged earlier start of vblank in *vpos and the distance
1817 * to true start of vblank in *hpos.
1818 *
1771 * \param *vpos Location where vertical scanout position should be stored. 1819 * \param *vpos Location where vertical scanout position should be stored.
1772 * \param *hpos Location where horizontal scanout position should go. 1820 * \param *hpos Location where horizontal scanout position should go.
1773 * \param *stime Target location for timestamp taken immediately before 1821 * \param *stime Target location for timestamp taken immediately before
@@ -1911,10 +1959,40 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
1911 vbl_end = 0; 1959 vbl_end = 0;
1912 } 1960 }
1913 1961
1962 /* Called from driver internal vblank counter query code? */
1963 if (flags & GET_DISTANCE_TO_VBLANKSTART) {
1964 /* Caller wants distance from real vbl_start in *hpos */
1965 *hpos = *vpos - vbl_start;
1966 }
1967
1968 /* Fudge vblank to start a few scanlines earlier to handle the
1969 * problem that vblank irqs fire a few scanlines before start
1970 * of vblank. Some driver internal callers need the true vblank
1971 * start to be used and signal this via the USE_REAL_VBLANKSTART flag.
1972 *
1973 * The cause of the "early" vblank irq is that the irq is triggered
1974 * by the line buffer logic when the line buffer read position enters
1975 * the vblank, whereas our crtc scanout position naturally lags the
1976 * line buffer read position.
1977 */
1978 if (!(flags & USE_REAL_VBLANKSTART))
1979 vbl_start -= rdev->mode_info.crtcs[pipe]->lb_vblank_lead_lines;
1980
1914 /* Test scanout position against vblank region. */ 1981 /* Test scanout position against vblank region. */
1915 if ((*vpos < vbl_start) && (*vpos >= vbl_end)) 1982 if ((*vpos < vbl_start) && (*vpos >= vbl_end))
1916 in_vbl = false; 1983 in_vbl = false;
1917 1984
1985 /* In vblank? */
1986 if (in_vbl)
1987 ret |= DRM_SCANOUTPOS_IN_VBLANK;
1988
1989 /* Called from driver internal vblank counter query code? */
1990 if (flags & GET_DISTANCE_TO_VBLANKSTART) {
1991 /* Caller wants distance from fudged earlier vbl_start */
1992 *vpos -= vbl_start;
1993 return ret;
1994 }
1995
1918 /* Check if inside vblank area and apply corrective offsets: 1996 /* Check if inside vblank area and apply corrective offsets:
1919 * vpos will then be >=0 in video scanout area, but negative 1997 * vpos will then be >=0 in video scanout area, but negative
1920 * within vblank area, counting down the number of lines until 1998 * within vblank area, counting down the number of lines until
@@ -1930,31 +2008,5 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
1930 /* Correct for shifted end of vbl at vbl_end. */ 2008 /* Correct for shifted end of vbl at vbl_end. */
1931 *vpos = *vpos - vbl_end; 2009 *vpos = *vpos - vbl_end;
1932 2010
1933 /* In vblank? */
1934 if (in_vbl)
1935 ret |= DRM_SCANOUTPOS_IN_VBLANK;
1936
1937 /* Is vpos outside nominal vblank area, but less than
1938 * 1/100 of a frame height away from start of vblank?
1939 * If so, assume this isn't a massively delayed vblank
1940 * interrupt, but a vblank interrupt that fired a few
1941 * microseconds before true start of vblank. Compensate
1942 * by adding a full frame duration to the final timestamp.
1943 * Happens, e.g., on ATI R500, R600.
1944 *
1945 * We only do this if DRM_CALLED_FROM_VBLIRQ.
1946 */
1947 if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) {
1948 vbl_start = mode->crtc_vdisplay;
1949 vtotal = mode->crtc_vtotal;
1950
1951 if (vbl_start - *vpos < vtotal / 100) {
1952 *vpos -= vtotal;
1953
1954 /* Signal this correction as "applied". */
1955 ret |= 0x8;
1956 }
1957 }
1958
1959 return ret; 2011 return ret;
1960} 2012}
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 744f5c49c664..94323f51ffcf 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -641,7 +641,7 @@ radeon_dp_create_fake_mst_encoder(struct radeon_connector *connector)
641 } 641 }
642 642
643 drm_encoder_init(dev, &radeon_encoder->base, &radeon_dp_mst_enc_funcs, 643 drm_encoder_init(dev, &radeon_encoder->base, &radeon_dp_mst_enc_funcs,
644 DRM_MODE_ENCODER_DPMST); 644 DRM_MODE_ENCODER_DPMST, NULL);
645 drm_encoder_helper_add(encoder, &radeon_mst_helper_funcs); 645 drm_encoder_helper_add(encoder, &radeon_mst_helper_funcs);
646 646
647 mst_enc = radeon_encoder->enc_priv; 647 mst_enc = radeon_encoder->enc_priv;
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 26da2f4d7b4f..adc44bbc81a9 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -44,7 +44,6 @@
44struct radeon_fbdev { 44struct radeon_fbdev {
45 struct drm_fb_helper helper; 45 struct drm_fb_helper helper;
46 struct radeon_framebuffer rfb; 46 struct radeon_framebuffer rfb;
47 struct list_head fbdev_list;
48 struct radeon_device *rdev; 47 struct radeon_device *rdev;
49}; 48};
50 49
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 171d3e43c30c..979f3bf65f2c 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -74,7 +74,7 @@ irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg)
74static void radeon_hotplug_work_func(struct work_struct *work) 74static void radeon_hotplug_work_func(struct work_struct *work)
75{ 75{
76 struct radeon_device *rdev = container_of(work, struct radeon_device, 76 struct radeon_device *rdev = container_of(work, struct radeon_device,
77 hotplug_work); 77 hotplug_work.work);
78 struct drm_device *dev = rdev->ddev; 78 struct drm_device *dev = rdev->ddev;
79 struct drm_mode_config *mode_config = &dev->mode_config; 79 struct drm_mode_config *mode_config = &dev->mode_config;
80 struct drm_connector *connector; 80 struct drm_connector *connector;
@@ -302,7 +302,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
302 } 302 }
303 } 303 }
304 304
305 INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); 305 INIT_DELAYED_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
306 INIT_WORK(&rdev->dp_work, radeon_dp_work_func); 306 INIT_WORK(&rdev->dp_work, radeon_dp_work_func);
307 INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi); 307 INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
308 308
@@ -310,7 +310,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
310 r = drm_irq_install(rdev->ddev, rdev->ddev->pdev->irq); 310 r = drm_irq_install(rdev->ddev, rdev->ddev->pdev->irq);
311 if (r) { 311 if (r) {
312 rdev->irq.installed = false; 312 rdev->irq.installed = false;
313 flush_work(&rdev->hotplug_work); 313 flush_delayed_work(&rdev->hotplug_work);
314 return r; 314 return r;
315 } 315 }
316 316
@@ -333,7 +333,7 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
333 rdev->irq.installed = false; 333 rdev->irq.installed = false;
334 if (rdev->msi_enabled) 334 if (rdev->msi_enabled)
335 pci_disable_msi(rdev->pdev); 335 pci_disable_msi(rdev->pdev);
336 flush_work(&rdev->hotplug_work); 336 flush_delayed_work(&rdev->hotplug_work);
337 } 337 }
338} 338}
339 339
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 0ec6fcca16d3..d290a8a09036 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -755,6 +755,8 @@ void radeon_driver_preclose_kms(struct drm_device *dev,
755 */ 755 */
756u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc) 756u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
757{ 757{
758 int vpos, hpos, stat;
759 u32 count;
758 struct radeon_device *rdev = dev->dev_private; 760 struct radeon_device *rdev = dev->dev_private;
759 761
760 if (crtc < 0 || crtc >= rdev->num_crtc) { 762 if (crtc < 0 || crtc >= rdev->num_crtc) {
@@ -762,7 +764,53 @@ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
762 return -EINVAL; 764 return -EINVAL;
763 } 765 }
764 766
765 return radeon_get_vblank_counter(rdev, crtc); 767 /* The hw increments its frame counter at start of vsync, not at start
768 * of vblank, as is required by DRM core vblank counter handling.
769 * Cook the hw count here to make it appear to the caller as if it
770 * incremented at start of vblank. We measure distance to start of
771 * vblank in vpos. vpos therefore will be >= 0 between start of vblank
772 * and start of vsync, so vpos >= 0 means to bump the hw frame counter
773 * result by 1 to give the proper appearance to caller.
774 */
775 if (rdev->mode_info.crtcs[crtc]) {
776 /* Repeat readout if needed to provide stable result if
777 * we cross start of vsync during the queries.
778 */
779 do {
780 count = radeon_get_vblank_counter(rdev, crtc);
781 /* Ask radeon_get_crtc_scanoutpos to return vpos as
782 * distance to start of vblank, instead of regular
783 * vertical scanout pos.
784 */
785 stat = radeon_get_crtc_scanoutpos(
786 dev, crtc, GET_DISTANCE_TO_VBLANKSTART,
787 &vpos, &hpos, NULL, NULL,
788 &rdev->mode_info.crtcs[crtc]->base.hwmode);
789 } while (count != radeon_get_vblank_counter(rdev, crtc));
790
791 if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
792 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
793 DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
794 }
795 else {
796 DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
797 crtc, vpos);
798
799 /* Bump counter if we are at >= leading edge of vblank,
800 * but before vsync where vpos would turn negative and
801 * the hw counter really increments.
802 */
803 if (vpos >= 0)
804 count++;
805 }
806 }
807 else {
808 /* Fallback to use value as is. */
809 count = radeon_get_vblank_counter(rdev, crtc);
810 DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
811 }
812
813 return count;
766} 814}
767 815
768/** 816/**
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 678b4386540d..32b338ff436b 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -25,6 +25,7 @@
25 */ 25 */
26#include <drm/drmP.h> 26#include <drm/drmP.h>
27#include <drm/drm_crtc_helper.h> 27#include <drm/drm_crtc_helper.h>
28#include <drm/drm_fb_helper.h>
28#include <drm/radeon_drm.h> 29#include <drm/radeon_drm.h>
29#include <drm/drm_fixed.h> 30#include <drm/drm_fixed.h>
30#include "radeon.h" 31#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 30de43366eae..88dc973fb209 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -1772,7 +1772,8 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_
1772 switch (radeon_encoder->encoder_id) { 1772 switch (radeon_encoder->encoder_id) {
1773 case ENCODER_OBJECT_ID_INTERNAL_LVDS: 1773 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
1774 encoder->possible_crtcs = 0x1; 1774 encoder->possible_crtcs = 0x1;
1775 drm_encoder_init(dev, encoder, &radeon_legacy_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS); 1775 drm_encoder_init(dev, encoder, &radeon_legacy_lvds_enc_funcs,
1776 DRM_MODE_ENCODER_LVDS, NULL);
1776 drm_encoder_helper_add(encoder, &radeon_legacy_lvds_helper_funcs); 1777 drm_encoder_helper_add(encoder, &radeon_legacy_lvds_helper_funcs);
1777 if (rdev->is_atom_bios) 1778 if (rdev->is_atom_bios)
1778 radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder); 1779 radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
@@ -1781,12 +1782,14 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_
1781 radeon_encoder->rmx_type = RMX_FULL; 1782 radeon_encoder->rmx_type = RMX_FULL;
1782 break; 1783 break;
1783 case ENCODER_OBJECT_ID_INTERNAL_TMDS1: 1784 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
1784 drm_encoder_init(dev, encoder, &radeon_legacy_tmds_int_enc_funcs, DRM_MODE_ENCODER_TMDS); 1785 drm_encoder_init(dev, encoder, &radeon_legacy_tmds_int_enc_funcs,
1786 DRM_MODE_ENCODER_TMDS, NULL);
1785 drm_encoder_helper_add(encoder, &radeon_legacy_tmds_int_helper_funcs); 1787 drm_encoder_helper_add(encoder, &radeon_legacy_tmds_int_helper_funcs);
1786 radeon_encoder->enc_priv = radeon_legacy_get_tmds_info(radeon_encoder); 1788 radeon_encoder->enc_priv = radeon_legacy_get_tmds_info(radeon_encoder);
1787 break; 1789 break;
1788 case ENCODER_OBJECT_ID_INTERNAL_DAC1: 1790 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
1789 drm_encoder_init(dev, encoder, &radeon_legacy_primary_dac_enc_funcs, DRM_MODE_ENCODER_DAC); 1791 drm_encoder_init(dev, encoder, &radeon_legacy_primary_dac_enc_funcs,
1792 DRM_MODE_ENCODER_DAC, NULL);
1790 drm_encoder_helper_add(encoder, &radeon_legacy_primary_dac_helper_funcs); 1793 drm_encoder_helper_add(encoder, &radeon_legacy_primary_dac_helper_funcs);
1791 if (rdev->is_atom_bios) 1794 if (rdev->is_atom_bios)
1792 radeon_encoder->enc_priv = radeon_atombios_get_primary_dac_info(radeon_encoder); 1795 radeon_encoder->enc_priv = radeon_atombios_get_primary_dac_info(radeon_encoder);
@@ -1794,7 +1797,8 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_
1794 radeon_encoder->enc_priv = radeon_combios_get_primary_dac_info(radeon_encoder); 1797 radeon_encoder->enc_priv = radeon_combios_get_primary_dac_info(radeon_encoder);
1795 break; 1798 break;
1796 case ENCODER_OBJECT_ID_INTERNAL_DAC2: 1799 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
1797 drm_encoder_init(dev, encoder, &radeon_legacy_tv_dac_enc_funcs, DRM_MODE_ENCODER_TVDAC); 1800 drm_encoder_init(dev, encoder, &radeon_legacy_tv_dac_enc_funcs,
1801 DRM_MODE_ENCODER_TVDAC, NULL);
1798 drm_encoder_helper_add(encoder, &radeon_legacy_tv_dac_helper_funcs); 1802 drm_encoder_helper_add(encoder, &radeon_legacy_tv_dac_helper_funcs);
1799 if (rdev->is_atom_bios) 1803 if (rdev->is_atom_bios)
1800 radeon_encoder->enc_priv = radeon_atombios_get_tv_dac_info(radeon_encoder); 1804 radeon_encoder->enc_priv = radeon_atombios_get_tv_dac_info(radeon_encoder);
@@ -1802,7 +1806,8 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_
1802 radeon_encoder->enc_priv = radeon_combios_get_tv_dac_info(radeon_encoder); 1806 radeon_encoder->enc_priv = radeon_combios_get_tv_dac_info(radeon_encoder);
1803 break; 1807 break;
1804 case ENCODER_OBJECT_ID_INTERNAL_DVO1: 1808 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
1805 drm_encoder_init(dev, encoder, &radeon_legacy_tmds_ext_enc_funcs, DRM_MODE_ENCODER_TMDS); 1809 drm_encoder_init(dev, encoder, &radeon_legacy_tmds_ext_enc_funcs,
1810 DRM_MODE_ENCODER_TMDS, NULL);
1806 drm_encoder_helper_add(encoder, &radeon_legacy_tmds_ext_helper_funcs); 1811 drm_encoder_helper_add(encoder, &radeon_legacy_tmds_ext_helper_funcs);
1807 if (!rdev->is_atom_bios) 1812 if (!rdev->is_atom_bios)
1808 radeon_encoder->enc_priv = radeon_legacy_get_ext_tmds_info(radeon_encoder); 1813 radeon_encoder->enc_priv = radeon_legacy_get_ext_tmds_info(radeon_encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 830e171c3a9e..cddd41b32eda 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -367,6 +367,7 @@ struct radeon_crtc {
367 u32 line_time; 367 u32 line_time;
368 u32 wm_low; 368 u32 wm_low;
369 u32 wm_high; 369 u32 wm_high;
370 u32 lb_vblank_lead_lines;
370 struct drm_display_mode hw_mode; 371 struct drm_display_mode hw_mode;
371 enum radeon_output_csc output_csc; 372 enum radeon_output_csc output_csc;
372}; 373};
@@ -553,6 +554,7 @@ struct radeon_connector {
553 void *con_priv; 554 void *con_priv;
554 bool dac_load_detect; 555 bool dac_load_detect;
555 bool detected_by_load; /* if the connection status was determined by load */ 556 bool detected_by_load; /* if the connection status was determined by load */
557 bool detected_hpd_without_ddc; /* if an HPD signal was detected on DVI, but ddc probing failed */
556 uint16_t connector_object_id; 558 uint16_t connector_object_id;
557 struct radeon_hpd hpd; 559 struct radeon_hpd hpd;
558 struct radeon_router router; 560 struct radeon_router router;
@@ -686,6 +688,9 @@ struct atom_voltage_table
686 struct atom_voltage_table_entry entries[MAX_VOLTAGE_ENTRIES]; 688 struct atom_voltage_table_entry entries[MAX_VOLTAGE_ENTRIES];
687}; 689};
688 690
691/* Driver internal use only flags of radeon_get_crtc_scanoutpos() */
692#define USE_REAL_VBLANKSTART (1 << 30)
693#define GET_DISTANCE_TO_VBLANKSTART (1 << 31)
689 694
690extern void 695extern void
691radeon_add_atom_connector(struct drm_device *dev, 696radeon_add_atom_connector(struct drm_device *dev,
@@ -929,7 +934,7 @@ extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green
929 u16 *blue, int regno); 934 u16 *blue, int regno);
930int radeon_framebuffer_init(struct drm_device *dev, 935int radeon_framebuffer_init(struct drm_device *dev,
931 struct radeon_framebuffer *rfb, 936 struct radeon_framebuffer *rfb,
932 struct drm_mode_fb_cmd2 *mode_cmd, 937 const struct drm_mode_fb_cmd2 *mode_cmd,
933 struct drm_gem_object *obj); 938 struct drm_gem_object *obj);
934 939
935int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb); 940int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index d3024883b844..84d45633d28c 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -221,11 +221,17 @@ int radeon_bo_create(struct radeon_device *rdev,
221 if (!(rdev->flags & RADEON_IS_PCIE)) 221 if (!(rdev->flags & RADEON_IS_PCIE))
222 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); 222 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
223 223
224 /* Write-combined CPU mappings of GTT cause GPU hangs with RV6xx
225 * See https://bugs.freedesktop.org/show_bug.cgi?id=91268
226 */
227 if (rdev->family >= CHIP_RV610 && rdev->family <= CHIP_RV635)
228 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
229
224#ifdef CONFIG_X86_32 230#ifdef CONFIG_X86_32
225 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit 231 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
226 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627 232 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
227 */ 233 */
228 bo->flags &= ~RADEON_GEM_GTT_WC; 234 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
229#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT) 235#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
230 /* Don't try to enable write-combining when it can't work, or things 236 /* Don't try to enable write-combining when it can't work, or things
231 * may be slow 237 * may be slow
@@ -235,9 +241,10 @@ int radeon_bo_create(struct radeon_device *rdev,
235#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ 241#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
236 thanks to write-combining 242 thanks to write-combining
237 243
238 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " 244 if (bo->flags & RADEON_GEM_GTT_WC)
239 "better performance thanks to write-combining\n"); 245 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
240 bo->flags &= ~RADEON_GEM_GTT_WC; 246 "better performance thanks to write-combining\n");
247 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
241#endif 248#endif
242 249
243 radeon_ttm_placement_from_domain(bo, domain); 250 radeon_ttm_placement_from_domain(bo, domain);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 6d80dde23400..59abebd6b5dc 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1542,8 +1542,7 @@ int radeon_pm_late_init(struct radeon_device *rdev)
1542 ret = device_create_file(rdev->dev, &dev_attr_power_method); 1542 ret = device_create_file(rdev->dev, &dev_attr_power_method);
1543 if (ret) 1543 if (ret)
1544 DRM_ERROR("failed to create device file for power method\n"); 1544 DRM_ERROR("failed to create device file for power method\n");
1545 if (!ret) 1545 rdev->pm.sysfs_initialized = true;
1546 rdev->pm.sysfs_initialized = true;
1547 } 1546 }
1548 1547
1549 mutex_lock(&rdev->pm.mutex); 1548 mutex_lock(&rdev->pm.mutex);
@@ -1757,7 +1756,9 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev)
1757 */ 1756 */
1758 for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) { 1757 for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
1759 if (rdev->pm.active_crtcs & (1 << crtc)) { 1758 if (rdev->pm.active_crtcs & (1 << crtc)) {
1760 vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, 0, 1759 vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev,
1760 crtc,
1761 USE_REAL_VBLANKSTART,
1761 &vpos, &hpos, NULL, NULL, 1762 &vpos, &hpos, NULL, NULL,
1762 &rdev->mode_info.crtcs[crtc]->base.hwmode); 1763 &rdev->mode_info.crtcs[crtc]->base.hwmode);
1763 if ((vbl_status & DRM_SCANOUTPOS_VALID) && 1764 if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 97a904835759..6244f4e44e9a 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -813,7 +813,7 @@ int rs600_irq_process(struct radeon_device *rdev)
813 status = rs600_irq_ack(rdev); 813 status = rs600_irq_ack(rdev);
814 } 814 }
815 if (queue_hotplug) 815 if (queue_hotplug)
816 schedule_work(&rdev->hotplug_work); 816 schedule_delayed_work(&rdev->hotplug_work, 0);
817 if (queue_hdmi) 817 if (queue_hdmi)
818 schedule_work(&rdev->audio_work); 818 schedule_work(&rdev->audio_work);
819 if (rdev->msi_enabled) { 819 if (rdev->msi_enabled) {
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 516ca27cfa12..6bc44c24e837 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -207,6 +207,9 @@ void rs690_line_buffer_adjust(struct radeon_device *rdev,
207{ 207{
208 u32 tmp; 208 u32 tmp;
209 209
210 /* Guess line buffer size to be 8192 pixels */
211 u32 lb_size = 8192;
212
210 /* 213 /*
211 * Line Buffer Setup 214 * Line Buffer Setup
212 * There is a single line buffer shared by both display controllers. 215 * There is a single line buffer shared by both display controllers.
@@ -243,6 +246,13 @@ void rs690_line_buffer_adjust(struct radeon_device *rdev,
243 tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; 246 tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
244 } 247 }
245 WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp); 248 WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp);
249
250 /* Save number of lines the linebuffer leads before the scanout */
251 if (mode1)
252 rdev->mode_info.crtcs[0]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode1->crtc_hdisplay);
253
254 if (mode2)
255 rdev->mode_info.crtcs[1]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode2->crtc_hdisplay);
246} 256}
247 257
248struct rs690_watermark { 258struct rs690_watermark {
diff --git a/drivers/gpu/drm/radeon/rv730_dpm.c b/drivers/gpu/drm/radeon/rv730_dpm.c
index 3f5e1cf138ba..d37ba2cb886e 100644
--- a/drivers/gpu/drm/radeon/rv730_dpm.c
+++ b/drivers/gpu/drm/radeon/rv730_dpm.c
@@ -464,7 +464,7 @@ void rv730_stop_dpm(struct radeon_device *rdev)
464 result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled); 464 result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled);
465 465
466 if (result != PPSMC_Result_OK) 466 if (result != PPSMC_Result_OK)
467 DRM_ERROR("Could not force DPM to low\n"); 467 DRM_DEBUG("Could not force DPM to low\n");
468 468
469 WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); 469 WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
470 470
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index b9c770745a7a..e830c8935db0 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -193,7 +193,7 @@ void rv770_stop_dpm(struct radeon_device *rdev)
193 result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled); 193 result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled);
194 194
195 if (result != PPSMC_Result_OK) 195 if (result != PPSMC_Result_OK)
196 DRM_ERROR("Could not force DPM to low.\n"); 196 DRM_DEBUG("Could not force DPM to low.\n");
197 197
198 WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); 198 WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
199 199
@@ -1418,7 +1418,7 @@ int rv770_resume_smc(struct radeon_device *rdev)
1418int rv770_set_sw_state(struct radeon_device *rdev) 1418int rv770_set_sw_state(struct radeon_device *rdev)
1419{ 1419{
1420 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToSwState) != PPSMC_Result_OK) 1420 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToSwState) != PPSMC_Result_OK)
1421 return -EINVAL; 1421 DRM_DEBUG("rv770_set_sw_state failed\n");
1422 return 0; 1422 return 0;
1423} 1423}
1424 1424
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 07037e32dea3..f878d6962da5 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2376,6 +2376,9 @@ static void dce6_program_watermarks(struct radeon_device *rdev,
2376 c.full = dfixed_div(c, a); 2376 c.full = dfixed_div(c, a);
2377 priority_b_mark = dfixed_trunc(c); 2377 priority_b_mark = dfixed_trunc(c);
2378 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK; 2378 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
2379
2380 /* Save number of lines the linebuffer leads before the scanout */
2381 radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
2379 } 2382 }
2380 2383
2381 /* select wm A */ 2384 /* select wm A */
@@ -6848,7 +6851,7 @@ restart_ih:
6848 if (queue_dp) 6851 if (queue_dp)
6849 schedule_work(&rdev->dp_work); 6852 schedule_work(&rdev->dp_work);
6850 if (queue_hotplug) 6853 if (queue_hotplug)
6851 schedule_work(&rdev->hotplug_work); 6854 schedule_delayed_work(&rdev->hotplug_work, 0);
6852 if (queue_thermal && rdev->pm.dpm_enabled) 6855 if (queue_thermal && rdev->pm.dpm_enabled)
6853 schedule_work(&rdev->pm.dpm.thermal.work); 6856 schedule_work(&rdev->pm.dpm.thermal.work);
6854 rdev->ih.rptr = rptr; 6857 rdev->ih.rptr = rptr;
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index e72bf46042e0..a82b891ae1fe 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2927,7 +2927,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
2927 { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, 2927 { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
2928 { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, 2928 { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
2929 { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 }, 2929 { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
2930 { PCI_VENDOR_ID_ATI, 0x6811, 0x1762, 0x2015, 0, 120000 }, 2930 { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
2931 { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 }, 2931 { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
2932 { 0, 0, 0, 0 }, 2932 { 0, 0, 0, 0 },
2933}; 2933};
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 48cb19949ca3..88a4b706be16 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -613,7 +613,7 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index)
613 613
614 ret = drm_crtc_init_with_planes(rcdu->ddev, crtc, 614 ret = drm_crtc_init_with_planes(rcdu->ddev, crtc,
615 &rgrp->planes[index % 2].plane, 615 &rgrp->planes[index % 2].plane,
616 NULL, &crtc_funcs); 616 NULL, &crtc_funcs, NULL);
617 if (ret < 0) 617 if (ret < 0)
618 return ret; 618 return ret;
619 619
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index d0ae1e8009c6..c08700757feb 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -173,7 +173,7 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
173 goto done; 173 goto done;
174 } else { 174 } else {
175 ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs, 175 ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs,
176 encoder_type); 176 encoder_type, NULL);
177 if (ret < 0) 177 if (ret < 0)
178 goto done; 178 goto done;
179 179
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
index 81da8419282b..11267de26a51 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
@@ -151,7 +151,7 @@ int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
151 goto error; 151 goto error;
152 152
153 ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs, 153 ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs,
154 DRM_MODE_ENCODER_TMDS); 154 DRM_MODE_ENCODER_TMDS, NULL);
155 if (ret < 0) 155 if (ret < 0)
156 goto error; 156 goto error;
157 157
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index ca12e8ca5552..43bce69d8560 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -136,7 +136,7 @@ int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
136 136
137static struct drm_framebuffer * 137static struct drm_framebuffer *
138rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv, 138rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
139 struct drm_mode_fb_cmd2 *mode_cmd) 139 const struct drm_mode_fb_cmd2 *mode_cmd)
140{ 140{
141 struct rcar_du_device *rcdu = dev->dev_private; 141 struct rcar_du_device *rcdu = dev->dev_private;
142 const struct rcar_du_format_info *format; 142 const struct rcar_du_format_info *format;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index ffa583712cd9..c3ed9522c0e1 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -410,7 +410,8 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp)
410 410
411 ret = drm_universal_plane_init(rcdu->ddev, &plane->plane, crtcs, 411 ret = drm_universal_plane_init(rcdu->ddev, &plane->plane, crtcs,
412 &rcar_du_plane_funcs, formats, 412 &rcar_du_plane_funcs, formats,
413 ARRAY_SIZE(formats), type); 413 ARRAY_SIZE(formats), type,
414 NULL);
414 if (ret < 0) 415 if (ret < 0)
415 return ret; 416 return ret;
416 417
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 80d6fc8a5cee..525b5a81e96e 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -295,7 +295,7 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
295 295
296 drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs); 296 drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs);
297 drm_encoder_init(drm, encoder, &dw_hdmi_rockchip_encoder_funcs, 297 drm_encoder_init(drm, encoder, &dw_hdmi_rockchip_encoder_funcs,
298 DRM_MODE_ENCODER_TMDS); 298 DRM_MODE_ENCODER_TMDS, NULL);
299 299
300 return dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data); 300 return dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data);
301} 301}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index 002645bb5bbf..b8ac5911c102 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -72,7 +72,7 @@ static struct drm_framebuffer_funcs rockchip_drm_fb_funcs = {
72}; 72};
73 73
74static struct rockchip_drm_fb * 74static struct rockchip_drm_fb *
75rockchip_fb_alloc(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode_cmd, 75rockchip_fb_alloc(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd,
76 struct drm_gem_object **obj, unsigned int num_planes) 76 struct drm_gem_object **obj, unsigned int num_planes)
77{ 77{
78 struct rockchip_drm_fb *rockchip_fb; 78 struct rockchip_drm_fb *rockchip_fb;
@@ -102,7 +102,7 @@ rockchip_fb_alloc(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode_cmd,
102 102
103static struct drm_framebuffer * 103static struct drm_framebuffer *
104rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, 104rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
105 struct drm_mode_fb_cmd2 *mode_cmd) 105 const struct drm_mode_fb_cmd2 *mode_cmd)
106{ 106{
107 struct rockchip_drm_fb *rockchip_fb; 107 struct rockchip_drm_fb *rockchip_fb;
108 struct drm_gem_object *objs[ROCKCHIP_MAX_FB_BUFFER]; 108 struct drm_gem_object *objs[ROCKCHIP_MAX_FB_BUFFER];
@@ -173,7 +173,7 @@ static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
173 173
174struct drm_framebuffer * 174struct drm_framebuffer *
175rockchip_drm_framebuffer_init(struct drm_device *dev, 175rockchip_drm_framebuffer_init(struct drm_device *dev,
176 struct drm_mode_fb_cmd2 *mode_cmd, 176 const struct drm_mode_fb_cmd2 *mode_cmd,
177 struct drm_gem_object *obj) 177 struct drm_gem_object *obj)
178{ 178{
179 struct rockchip_drm_fb *rockchip_fb; 179 struct rockchip_drm_fb *rockchip_fb;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.h b/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
index 09574d48226f..2fe47f1ee98f 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
@@ -17,7 +17,7 @@
17 17
18struct drm_framebuffer * 18struct drm_framebuffer *
19rockchip_drm_framebuffer_init(struct drm_device *dev, 19rockchip_drm_framebuffer_init(struct drm_device *dev,
20 struct drm_mode_fb_cmd2 *mode_cmd, 20 const struct drm_mode_fb_cmd2 *mode_cmd,
21 struct drm_gem_object *obj); 21 struct drm_gem_object *obj);
22void rockchip_drm_framebuffer_fini(struct drm_framebuffer *fb); 22void rockchip_drm_framebuffer_fini(struct drm_framebuffer *fb);
23 23
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 8caea0a33dd8..d908321b94ce 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -67,6 +67,7 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
67 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap(). 67 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
68 */ 68 */
69 vma->vm_flags &= ~VM_PFNMAP; 69 vma->vm_flags &= ~VM_PFNMAP;
70 vma->vm_pgoff = 0;
70 71
71 ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, 72 ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
72 obj->size, &rk_obj->dma_attrs); 73 obj->size, &rk_obj->dma_attrs);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 5d8ae5e49c44..dd8e0860ad4e 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -374,6 +374,7 @@ static const struct of_device_id vop_driver_dt_match[] = {
374 .data = &rk3288_vop }, 374 .data = &rk3288_vop },
375 {}, 375 {},
376}; 376};
377MODULE_DEVICE_TABLE(of, vop_driver_dt_match);
377 378
378static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v) 379static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v)
379{ 380{
@@ -959,8 +960,8 @@ static int vop_update_plane_event(struct drm_plane *plane,
959 val = (dest.y2 - dest.y1 - 1) << 16; 960 val = (dest.y2 - dest.y1 - 1) << 16;
960 val |= (dest.x2 - dest.x1 - 1) & 0xffff; 961 val |= (dest.x2 - dest.x1 - 1) & 0xffff;
961 VOP_WIN_SET(vop, win, dsp_info, val); 962 VOP_WIN_SET(vop, win, dsp_info, val);
962 val = (dsp_sty - 1) << 16; 963 val = dsp_sty << 16;
963 val |= (dsp_stx - 1) & 0xffff; 964 val |= dsp_stx & 0xffff;
964 VOP_WIN_SET(vop, win, dsp_st, val); 965 VOP_WIN_SET(vop, win, dsp_st, val);
965 VOP_WIN_SET(vop, win, rb_swap, rb_swap); 966 VOP_WIN_SET(vop, win, rb_swap, rb_swap);
966 967
@@ -1289,7 +1290,7 @@ static void vop_win_state_complete(struct vop_win *vop_win,
1289 1290
1290 if (state->event) { 1291 if (state->event) {
1291 spin_lock_irqsave(&drm->event_lock, flags); 1292 spin_lock_irqsave(&drm->event_lock, flags);
1292 drm_send_vblank_event(drm, -1, state->event); 1293 drm_crtc_send_vblank_event(crtc, state->event);
1293 spin_unlock_irqrestore(&drm->event_lock, flags); 1294 spin_unlock_irqrestore(&drm->event_lock, flags);
1294 } 1295 }
1295 1296
@@ -1477,7 +1478,7 @@ static int vop_create_crtc(struct vop *vop)
1477 0, &vop_plane_funcs, 1478 0, &vop_plane_funcs,
1478 win_data->phy->data_formats, 1479 win_data->phy->data_formats,
1479 win_data->phy->nformats, 1480 win_data->phy->nformats,
1480 win_data->type); 1481 win_data->type, NULL);
1481 if (ret) { 1482 if (ret) {
1482 DRM_ERROR("failed to initialize plane\n"); 1483 DRM_ERROR("failed to initialize plane\n");
1483 goto err_cleanup_planes; 1484 goto err_cleanup_planes;
@@ -1491,7 +1492,7 @@ static int vop_create_crtc(struct vop *vop)
1491 } 1492 }
1492 1493
1493 ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor, 1494 ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
1494 &vop_crtc_funcs); 1495 &vop_crtc_funcs, NULL);
1495 if (ret) 1496 if (ret)
1496 return ret; 1497 return ret;
1497 1498
@@ -1514,7 +1515,7 @@ static int vop_create_crtc(struct vop *vop)
1514 &vop_plane_funcs, 1515 &vop_plane_funcs,
1515 win_data->phy->data_formats, 1516 win_data->phy->data_formats,
1516 win_data->phy->nformats, 1517 win_data->phy->nformats,
1517 win_data->type); 1518 win_data->type, NULL);
1518 if (ret) { 1519 if (ret) {
1519 DRM_ERROR("failed to initialize overlay plane\n"); 1520 DRM_ERROR("failed to initialize overlay plane\n");
1520 goto err_cleanup_crtc; 1521 goto err_cleanup_crtc;
@@ -1575,32 +1576,25 @@ static int vop_initial(struct vop *vop)
1575 return PTR_ERR(vop->dclk); 1576 return PTR_ERR(vop->dclk);
1576 } 1577 }
1577 1578
1578 ret = clk_prepare(vop->hclk);
1579 if (ret < 0) {
1580 dev_err(vop->dev, "failed to prepare hclk\n");
1581 return ret;
1582 }
1583
1584 ret = clk_prepare(vop->dclk); 1579 ret = clk_prepare(vop->dclk);
1585 if (ret < 0) { 1580 if (ret < 0) {
1586 dev_err(vop->dev, "failed to prepare dclk\n"); 1581 dev_err(vop->dev, "failed to prepare dclk\n");
1587 goto err_unprepare_hclk; 1582 return ret;
1588 } 1583 }
1589 1584
1590 ret = clk_prepare(vop->aclk); 1585 /* Enable both the hclk and aclk to setup the vop */
1586 ret = clk_prepare_enable(vop->hclk);
1591 if (ret < 0) { 1587 if (ret < 0) {
1592 dev_err(vop->dev, "failed to prepare aclk\n"); 1588 dev_err(vop->dev, "failed to prepare/enable hclk\n");
1593 goto err_unprepare_dclk; 1589 goto err_unprepare_dclk;
1594 } 1590 }
1595 1591
1596 /* 1592 ret = clk_prepare_enable(vop->aclk);
1597 * enable hclk, so that we can config vop register.
1598 */
1599 ret = clk_enable(vop->hclk);
1600 if (ret < 0) { 1593 if (ret < 0) {
1601 dev_err(vop->dev, "failed to prepare aclk\n"); 1594 dev_err(vop->dev, "failed to prepare/enable aclk\n");
1602 goto err_unprepare_aclk; 1595 goto err_disable_hclk;
1603 } 1596 }
1597
1604 /* 1598 /*
1605 * do hclk_reset, reset all vop registers. 1599 * do hclk_reset, reset all vop registers.
1606 */ 1600 */
@@ -1608,7 +1602,7 @@ static int vop_initial(struct vop *vop)
1608 if (IS_ERR(ahb_rst)) { 1602 if (IS_ERR(ahb_rst)) {
1609 dev_err(vop->dev, "failed to get ahb reset\n"); 1603 dev_err(vop->dev, "failed to get ahb reset\n");
1610 ret = PTR_ERR(ahb_rst); 1604 ret = PTR_ERR(ahb_rst);
1611 goto err_disable_hclk; 1605 goto err_disable_aclk;
1612 } 1606 }
1613 reset_control_assert(ahb_rst); 1607 reset_control_assert(ahb_rst);
1614 usleep_range(10, 20); 1608 usleep_range(10, 20);
@@ -1634,26 +1628,25 @@ static int vop_initial(struct vop *vop)
1634 if (IS_ERR(vop->dclk_rst)) { 1628 if (IS_ERR(vop->dclk_rst)) {
1635 dev_err(vop->dev, "failed to get dclk reset\n"); 1629 dev_err(vop->dev, "failed to get dclk reset\n");
1636 ret = PTR_ERR(vop->dclk_rst); 1630 ret = PTR_ERR(vop->dclk_rst);
1637 goto err_unprepare_aclk; 1631 goto err_disable_aclk;
1638 } 1632 }
1639 reset_control_assert(vop->dclk_rst); 1633 reset_control_assert(vop->dclk_rst);
1640 usleep_range(10, 20); 1634 usleep_range(10, 20);
1641 reset_control_deassert(vop->dclk_rst); 1635 reset_control_deassert(vop->dclk_rst);
1642 1636
1643 clk_disable(vop->hclk); 1637 clk_disable(vop->hclk);
1638 clk_disable(vop->aclk);
1644 1639
1645 vop->is_enabled = false; 1640 vop->is_enabled = false;
1646 1641
1647 return 0; 1642 return 0;
1648 1643
1644err_disable_aclk:
1645 clk_disable_unprepare(vop->aclk);
1649err_disable_hclk: 1646err_disable_hclk:
1650 clk_disable(vop->hclk); 1647 clk_disable_unprepare(vop->hclk);
1651err_unprepare_aclk:
1652 clk_unprepare(vop->aclk);
1653err_unprepare_dclk: 1648err_unprepare_dclk:
1654 clk_unprepare(vop->dclk); 1649 clk_unprepare(vop->dclk);
1655err_unprepare_hclk:
1656 clk_unprepare(vop->hclk);
1657 return ret; 1650 return ret;
1658} 1651}
1659 1652
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index e9272b0a8592..b80802f55143 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -613,7 +613,7 @@ int shmob_drm_encoder_create(struct shmob_drm_device *sdev)
613 encoder->possible_crtcs = 1; 613 encoder->possible_crtcs = 1;
614 614
615 ret = drm_encoder_init(sdev->ddev, encoder, &encoder_funcs, 615 ret = drm_encoder_init(sdev->ddev, encoder, &encoder_funcs,
616 DRM_MODE_ENCODER_LVDS); 616 DRM_MODE_ENCODER_LVDS, NULL);
617 if (ret < 0) 617 if (ret < 0)
618 return ret; 618 return ret;
619 619
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_kms.c b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
index aaf98ace4a90..388a0fc13564 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_kms.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
@@ -104,7 +104,7 @@ const struct shmob_drm_format_info *shmob_drm_format_info(u32 fourcc)
104 104
105static struct drm_framebuffer * 105static struct drm_framebuffer *
106shmob_drm_fb_create(struct drm_device *dev, struct drm_file *file_priv, 106shmob_drm_fb_create(struct drm_device *dev, struct drm_file *file_priv,
107 struct drm_mode_fb_cmd2 *mode_cmd) 107 const struct drm_mode_fb_cmd2 *mode_cmd)
108{ 108{
109 const struct shmob_drm_format_info *format; 109 const struct shmob_drm_format_info *format;
110 110
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index 3ae09dcd4fd8..de11c7cfb02f 100644
--- a/drivers/gpu/drm/sti/sti_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -367,7 +367,7 @@ int sti_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
367 int res; 367 int res;
368 368
369 res = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor, 369 res = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
370 &sti_crtc_funcs); 370 &sti_crtc_funcs, NULL);
371 if (res) { 371 if (res) {
372 DRM_ERROR("Can't initialze CRTC\n"); 372 DRM_ERROR("Can't initialze CRTC\n");
373 return -EINVAL; 373 return -EINVAL;
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index dd1032195051..807863106b8d 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -272,7 +272,7 @@ struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
272 &sti_plane_helpers_funcs, 272 &sti_plane_helpers_funcs,
273 cursor_supported_formats, 273 cursor_supported_formats,
274 ARRAY_SIZE(cursor_supported_formats), 274 ARRAY_SIZE(cursor_supported_formats),
275 DRM_PLANE_TYPE_CURSOR); 275 DRM_PLANE_TYPE_CURSOR, NULL);
276 if (res) { 276 if (res) {
277 DRM_ERROR("Failed to initialize universal plane\n"); 277 DRM_ERROR("Failed to initialize universal plane\n");
278 goto err_plane; 278 goto err_plane;
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index c85dc7d6b005..f9a1d92c9d95 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -630,7 +630,7 @@ struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
630 &sti_plane_helpers_funcs, 630 &sti_plane_helpers_funcs,
631 gdp_supported_formats, 631 gdp_supported_formats,
632 ARRAY_SIZE(gdp_supported_formats), 632 ARRAY_SIZE(gdp_supported_formats),
633 type); 633 type, NULL);
634 if (res) { 634 if (res) {
635 DRM_ERROR("Failed to initialize universal plane\n"); 635 DRM_ERROR("Failed to initialize universal plane\n");
636 goto err; 636 goto err;
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index d735daccd458..49cce833f2c8 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -543,8 +543,6 @@ static int sti_hda_connector_get_modes(struct drm_connector *connector)
543 count++; 543 count++;
544 } 544 }
545 545
546 drm_mode_sort(&connector->modes);
547
548 return count; 546 return count;
549} 547}
550 548
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index ea0690bc77d5..43861b52261d 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -973,7 +973,7 @@ static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev,
973 &sti_plane_helpers_funcs, 973 &sti_plane_helpers_funcs,
974 hqvdp_supported_formats, 974 hqvdp_supported_formats,
975 ARRAY_SIZE(hqvdp_supported_formats), 975 ARRAY_SIZE(hqvdp_supported_formats),
976 DRM_PLANE_TYPE_OVERLAY); 976 DRM_PLANE_TYPE_OVERLAY, NULL);
977 if (res) { 977 if (res) {
978 DRM_ERROR("Failed to initialize universal plane\n"); 978 DRM_ERROR("Failed to initialize universal plane\n");
979 return NULL; 979 return NULL;
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index c8a4c5dae2b6..f2afcf5438b8 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -512,7 +512,8 @@ sti_tvout_create_dvo_encoder(struct drm_device *dev,
512 drm_encoder->possible_clones = 1 << 0; 512 drm_encoder->possible_clones = 1 << 0;
513 513
514 drm_encoder_init(dev, drm_encoder, 514 drm_encoder_init(dev, drm_encoder,
515 &sti_tvout_encoder_funcs, DRM_MODE_ENCODER_LVDS); 515 &sti_tvout_encoder_funcs, DRM_MODE_ENCODER_LVDS,
516 NULL);
516 517
517 drm_encoder_helper_add(drm_encoder, &sti_dvo_encoder_helper_funcs); 518 drm_encoder_helper_add(drm_encoder, &sti_dvo_encoder_helper_funcs);
518 519
@@ -564,7 +565,7 @@ static struct drm_encoder *sti_tvout_create_hda_encoder(struct drm_device *dev,
564 drm_encoder->possible_clones = 1 << 0; 565 drm_encoder->possible_clones = 1 << 0;
565 566
566 drm_encoder_init(dev, drm_encoder, 567 drm_encoder_init(dev, drm_encoder,
567 &sti_tvout_encoder_funcs, DRM_MODE_ENCODER_DAC); 568 &sti_tvout_encoder_funcs, DRM_MODE_ENCODER_DAC, NULL);
568 569
569 drm_encoder_helper_add(drm_encoder, &sti_hda_encoder_helper_funcs); 570 drm_encoder_helper_add(drm_encoder, &sti_hda_encoder_helper_funcs);
570 571
@@ -613,7 +614,7 @@ static struct drm_encoder *sti_tvout_create_hdmi_encoder(struct drm_device *dev,
613 drm_encoder->possible_clones = 1 << 1; 614 drm_encoder->possible_clones = 1 << 1;
614 615
615 drm_encoder_init(dev, drm_encoder, 616 drm_encoder_init(dev, drm_encoder,
616 &sti_tvout_encoder_funcs, DRM_MODE_ENCODER_TMDS); 617 &sti_tvout_encoder_funcs, DRM_MODE_ENCODER_TMDS, NULL);
617 618
618 drm_encoder_helper_add(drm_encoder, &sti_hdmi_encoder_helper_funcs); 619 drm_encoder_helper_add(drm_encoder, &sti_hdmi_encoder_helper_funcs);
619 620
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index 74d9d621453d..63ebb154b9b5 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -16,18 +16,6 @@ config DRM_TEGRA
16 16
17if DRM_TEGRA 17if DRM_TEGRA
18 18
19config DRM_TEGRA_FBDEV
20 bool "Enable legacy fbdev support"
21 select DRM_KMS_FB_HELPER
22 select FB_SYS_FILLRECT
23 select FB_SYS_COPYAREA
24 select FB_SYS_IMAGEBLIT
25 default y
26 help
27 Choose this option if you have a need for the legacy fbdev support.
28 Note that this support also provides the Linux console on top of
29 the Tegra modesetting driver.
30
31config DRM_TEGRA_DEBUG 19config DRM_TEGRA_DEBUG
32 bool "NVIDIA Tegra DRM debug support" 20 bool "NVIDIA Tegra DRM debug support"
33 help 21 help
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index e9f24a85a103..1f5cb68357c7 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -660,7 +660,8 @@ static struct drm_plane *tegra_dc_primary_plane_create(struct drm_device *drm,
660 660
661 err = drm_universal_plane_init(drm, &plane->base, possible_crtcs, 661 err = drm_universal_plane_init(drm, &plane->base, possible_crtcs,
662 &tegra_primary_plane_funcs, formats, 662 &tegra_primary_plane_funcs, formats,
663 num_formats, DRM_PLANE_TYPE_PRIMARY); 663 num_formats, DRM_PLANE_TYPE_PRIMARY,
664 NULL);
664 if (err < 0) { 665 if (err < 0) {
665 kfree(plane); 666 kfree(plane);
666 return ERR_PTR(err); 667 return ERR_PTR(err);
@@ -827,7 +828,8 @@ static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
827 828
828 err = drm_universal_plane_init(drm, &plane->base, 1 << dc->pipe, 829 err = drm_universal_plane_init(drm, &plane->base, 1 << dc->pipe,
829 &tegra_cursor_plane_funcs, formats, 830 &tegra_cursor_plane_funcs, formats,
830 num_formats, DRM_PLANE_TYPE_CURSOR); 831 num_formats, DRM_PLANE_TYPE_CURSOR,
832 NULL);
831 if (err < 0) { 833 if (err < 0) {
832 kfree(plane); 834 kfree(plane);
833 return ERR_PTR(err); 835 return ERR_PTR(err);
@@ -890,7 +892,8 @@ static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm,
890 892
891 err = drm_universal_plane_init(drm, &plane->base, 1 << dc->pipe, 893 err = drm_universal_plane_init(drm, &plane->base, 1 << dc->pipe,
892 &tegra_overlay_plane_funcs, formats, 894 &tegra_overlay_plane_funcs, formats,
893 num_formats, DRM_PLANE_TYPE_OVERLAY); 895 num_formats, DRM_PLANE_TYPE_OVERLAY,
896 NULL);
894 if (err < 0) { 897 if (err < 0) {
895 kfree(plane); 898 kfree(plane);
896 return ERR_PTR(err); 899 return ERR_PTR(err);
@@ -1732,7 +1735,7 @@ static int tegra_dc_init(struct host1x_client *client)
1732 } 1735 }
1733 1736
1734 err = drm_crtc_init_with_planes(drm, &dc->base, primary, cursor, 1737 err = drm_crtc_init_with_planes(drm, &dc->base, primary, cursor,
1735 &tegra_crtc_funcs); 1738 &tegra_crtc_funcs, NULL);
1736 if (err < 0) 1739 if (err < 0)
1737 goto cleanup; 1740 goto cleanup;
1738 1741
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 159ef515cab1..e0f827790a5e 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -106,7 +106,7 @@ static int tegra_atomic_commit(struct drm_device *drm,
106 106
107static const struct drm_mode_config_funcs tegra_drm_mode_funcs = { 107static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
108 .fb_create = tegra_fb_create, 108 .fb_create = tegra_fb_create,
109#ifdef CONFIG_DRM_TEGRA_FBDEV 109#ifdef CONFIG_DRM_FBDEV_EMULATION
110 .output_poll_changed = tegra_fb_output_poll_changed, 110 .output_poll_changed = tegra_fb_output_poll_changed,
111#endif 111#endif
112 .atomic_check = drm_atomic_helper_check, 112 .atomic_check = drm_atomic_helper_check,
@@ -260,7 +260,7 @@ static void tegra_drm_context_free(struct tegra_drm_context *context)
260 260
261static void tegra_drm_lastclose(struct drm_device *drm) 261static void tegra_drm_lastclose(struct drm_device *drm)
262{ 262{
263#ifdef CONFIG_DRM_TEGRA_FBDEV 263#ifdef CONFIG_DRM_FBDEV_EMULATION
264 struct tegra_drm *tegra = drm->dev_private; 264 struct tegra_drm *tegra = drm->dev_private;
265 265
266 tegra_fbdev_restore_mode(tegra->fbdev); 266 tegra_fbdev_restore_mode(tegra->fbdev);
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index ec49275ffb24..d88a2d18c1a4 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -30,7 +30,7 @@ struct tegra_fb {
30 unsigned int num_planes; 30 unsigned int num_planes;
31}; 31};
32 32
33#ifdef CONFIG_DRM_TEGRA_FBDEV 33#ifdef CONFIG_DRM_FBDEV_EMULATION
34struct tegra_fbdev { 34struct tegra_fbdev {
35 struct drm_fb_helper base; 35 struct drm_fb_helper base;
36 struct tegra_fb *fb; 36 struct tegra_fb *fb;
@@ -46,7 +46,7 @@ struct tegra_drm {
46 struct mutex clients_lock; 46 struct mutex clients_lock;
47 struct list_head clients; 47 struct list_head clients;
48 48
49#ifdef CONFIG_DRM_TEGRA_FBDEV 49#ifdef CONFIG_DRM_FBDEV_EMULATION
50 struct tegra_fbdev *fbdev; 50 struct tegra_fbdev *fbdev;
51#endif 51#endif
52 52
@@ -268,12 +268,12 @@ int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
268 struct tegra_bo_tiling *tiling); 268 struct tegra_bo_tiling *tiling);
269struct drm_framebuffer *tegra_fb_create(struct drm_device *drm, 269struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
270 struct drm_file *file, 270 struct drm_file *file,
271 struct drm_mode_fb_cmd2 *cmd); 271 const struct drm_mode_fb_cmd2 *cmd);
272int tegra_drm_fb_prepare(struct drm_device *drm); 272int tegra_drm_fb_prepare(struct drm_device *drm);
273void tegra_drm_fb_free(struct drm_device *drm); 273void tegra_drm_fb_free(struct drm_device *drm);
274int tegra_drm_fb_init(struct drm_device *drm); 274int tegra_drm_fb_init(struct drm_device *drm);
275void tegra_drm_fb_exit(struct drm_device *drm); 275void tegra_drm_fb_exit(struct drm_device *drm);
276#ifdef CONFIG_DRM_TEGRA_FBDEV 276#ifdef CONFIG_DRM_FBDEV_EMULATION
277void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev); 277void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev);
278void tegra_fb_output_poll_changed(struct drm_device *drm); 278void tegra_fb_output_poll_changed(struct drm_device *drm);
279#endif 279#endif
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index f0a138ef68ce..50d46ae3786b 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -1023,7 +1023,7 @@ static int tegra_dsi_init(struct host1x_client *client)
1023 1023
1024 drm_encoder_init(drm, &dsi->output.encoder, 1024 drm_encoder_init(drm, &dsi->output.encoder,
1025 &tegra_dsi_encoder_funcs, 1025 &tegra_dsi_encoder_funcs,
1026 DRM_MODE_ENCODER_DSI); 1026 DRM_MODE_ENCODER_DSI, NULL);
1027 drm_encoder_helper_add(&dsi->output.encoder, 1027 drm_encoder_helper_add(&dsi->output.encoder,
1028 &tegra_dsi_encoder_helper_funcs); 1028 &tegra_dsi_encoder_helper_funcs);
1029 1029
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 1004075fd088..ede9e94f3312 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -18,7 +18,7 @@ static inline struct tegra_fb *to_tegra_fb(struct drm_framebuffer *fb)
18 return container_of(fb, struct tegra_fb, base); 18 return container_of(fb, struct tegra_fb, base);
19} 19}
20 20
21#ifdef CONFIG_DRM_TEGRA_FBDEV 21#ifdef CONFIG_DRM_FBDEV_EMULATION
22static inline struct tegra_fbdev *to_tegra_fbdev(struct drm_fb_helper *helper) 22static inline struct tegra_fbdev *to_tegra_fbdev(struct drm_fb_helper *helper)
23{ 23{
24 return container_of(helper, struct tegra_fbdev, base); 24 return container_of(helper, struct tegra_fbdev, base);
@@ -92,7 +92,7 @@ static struct drm_framebuffer_funcs tegra_fb_funcs = {
92}; 92};
93 93
94static struct tegra_fb *tegra_fb_alloc(struct drm_device *drm, 94static struct tegra_fb *tegra_fb_alloc(struct drm_device *drm,
95 struct drm_mode_fb_cmd2 *mode_cmd, 95 const struct drm_mode_fb_cmd2 *mode_cmd,
96 struct tegra_bo **planes, 96 struct tegra_bo **planes,
97 unsigned int num_planes) 97 unsigned int num_planes)
98{ 98{
@@ -131,7 +131,7 @@ static struct tegra_fb *tegra_fb_alloc(struct drm_device *drm,
131 131
132struct drm_framebuffer *tegra_fb_create(struct drm_device *drm, 132struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
133 struct drm_file *file, 133 struct drm_file *file,
134 struct drm_mode_fb_cmd2 *cmd) 134 const struct drm_mode_fb_cmd2 *cmd)
135{ 135{
136 unsigned int hsub, vsub, i; 136 unsigned int hsub, vsub, i;
137 struct tegra_bo *planes[4]; 137 struct tegra_bo *planes[4];
@@ -181,7 +181,7 @@ unreference:
181 return ERR_PTR(err); 181 return ERR_PTR(err);
182} 182}
183 183
184#ifdef CONFIG_DRM_TEGRA_FBDEV 184#ifdef CONFIG_DRM_FBDEV_EMULATION
185static struct fb_ops tegra_fb_ops = { 185static struct fb_ops tegra_fb_ops = {
186 .owner = THIS_MODULE, 186 .owner = THIS_MODULE,
187 .fb_fillrect = drm_fb_helper_sys_fillrect, 187 .fb_fillrect = drm_fb_helper_sys_fillrect,
@@ -370,7 +370,7 @@ void tegra_fb_output_poll_changed(struct drm_device *drm)
370 370
371int tegra_drm_fb_prepare(struct drm_device *drm) 371int tegra_drm_fb_prepare(struct drm_device *drm)
372{ 372{
373#ifdef CONFIG_DRM_TEGRA_FBDEV 373#ifdef CONFIG_DRM_FBDEV_EMULATION
374 struct tegra_drm *tegra = drm->dev_private; 374 struct tegra_drm *tegra = drm->dev_private;
375 375
376 tegra->fbdev = tegra_fbdev_create(drm); 376 tegra->fbdev = tegra_fbdev_create(drm);
@@ -383,7 +383,7 @@ int tegra_drm_fb_prepare(struct drm_device *drm)
383 383
384void tegra_drm_fb_free(struct drm_device *drm) 384void tegra_drm_fb_free(struct drm_device *drm)
385{ 385{
386#ifdef CONFIG_DRM_TEGRA_FBDEV 386#ifdef CONFIG_DRM_FBDEV_EMULATION
387 struct tegra_drm *tegra = drm->dev_private; 387 struct tegra_drm *tegra = drm->dev_private;
388 388
389 tegra_fbdev_free(tegra->fbdev); 389 tegra_fbdev_free(tegra->fbdev);
@@ -392,7 +392,7 @@ void tegra_drm_fb_free(struct drm_device *drm)
392 392
393int tegra_drm_fb_init(struct drm_device *drm) 393int tegra_drm_fb_init(struct drm_device *drm)
394{ 394{
395#ifdef CONFIG_DRM_TEGRA_FBDEV 395#ifdef CONFIG_DRM_FBDEV_EMULATION
396 struct tegra_drm *tegra = drm->dev_private; 396 struct tegra_drm *tegra = drm->dev_private;
397 int err; 397 int err;
398 398
@@ -407,7 +407,7 @@ int tegra_drm_fb_init(struct drm_device *drm)
407 407
408void tegra_drm_fb_exit(struct drm_device *drm) 408void tegra_drm_fb_exit(struct drm_device *drm)
409{ 409{
410#ifdef CONFIG_DRM_TEGRA_FBDEV 410#ifdef CONFIG_DRM_FBDEV_EMULATION
411 struct tegra_drm *tegra = drm->dev_private; 411 struct tegra_drm *tegra = drm->dev_private;
412 412
413 tegra_fbdev_exit(tegra->fbdev); 413 tegra_fbdev_exit(tegra->fbdev);
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 52b32cbd9de6..b7ef4929e347 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -1320,7 +1320,7 @@ static int tegra_hdmi_init(struct host1x_client *client)
1320 hdmi->output.connector.dpms = DRM_MODE_DPMS_OFF; 1320 hdmi->output.connector.dpms = DRM_MODE_DPMS_OFF;
1321 1321
1322 drm_encoder_init(drm, &hdmi->output.encoder, &tegra_hdmi_encoder_funcs, 1322 drm_encoder_init(drm, &hdmi->output.encoder, &tegra_hdmi_encoder_funcs,
1323 DRM_MODE_ENCODER_TMDS); 1323 DRM_MODE_ENCODER_TMDS, NULL);
1324 drm_encoder_helper_add(&hdmi->output.encoder, 1324 drm_encoder_helper_add(&hdmi->output.encoder,
1325 &tegra_hdmi_encoder_helper_funcs); 1325 &tegra_hdmi_encoder_helper_funcs);
1326 1326
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index bc9735b4ad60..e246334e0252 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -287,7 +287,7 @@ int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc)
287 output->connector.dpms = DRM_MODE_DPMS_OFF; 287 output->connector.dpms = DRM_MODE_DPMS_OFF;
288 288
289 drm_encoder_init(drm, &output->encoder, &tegra_rgb_encoder_funcs, 289 drm_encoder_init(drm, &output->encoder, &tegra_rgb_encoder_funcs,
290 DRM_MODE_ENCODER_LVDS); 290 DRM_MODE_ENCODER_LVDS, NULL);
291 drm_encoder_helper_add(&output->encoder, 291 drm_encoder_helper_add(&output->encoder,
292 &tegra_rgb_encoder_helper_funcs); 292 &tegra_rgb_encoder_helper_funcs);
293 293
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 3eff7cf75d25..3e012ee25242 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -2178,7 +2178,7 @@ static int tegra_sor_init(struct host1x_client *client)
2178 sor->output.connector.dpms = DRM_MODE_DPMS_OFF; 2178 sor->output.connector.dpms = DRM_MODE_DPMS_OFF;
2179 2179
2180 drm_encoder_init(drm, &sor->output.encoder, &tegra_sor_encoder_funcs, 2180 drm_encoder_init(drm, &sor->output.encoder, &tegra_sor_encoder_funcs,
2181 encoder); 2181 encoder, NULL);
2182 drm_encoder_helper_add(&sor->output.encoder, helpers); 2182 drm_encoder_helper_add(&sor->output.encoder, helpers);
2183 2183
2184 drm_mode_connector_attach_encoder(&sor->output.connector, 2184 drm_mode_connector_attach_encoder(&sor->output.connector,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 876cad58b1f9..4ddb21e7f52f 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -46,7 +46,7 @@ void tilcdc_module_cleanup(struct tilcdc_module *mod)
46static struct of_device_id tilcdc_of_match[]; 46static struct of_device_id tilcdc_of_match[];
47 47
48static struct drm_framebuffer *tilcdc_fb_create(struct drm_device *dev, 48static struct drm_framebuffer *tilcdc_fb_create(struct drm_device *dev,
49 struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd) 49 struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd)
50{ 50{
51 return drm_fb_cma_create(dev, file_priv, mode_cmd); 51 return drm_fb_cma_create(dev, file_priv, mode_cmd);
52} 52}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 0af8bed7ce1e..4dda6e2f464b 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -128,7 +128,7 @@ static struct drm_encoder *panel_encoder_create(struct drm_device *dev,
128 encoder->possible_crtcs = 1; 128 encoder->possible_crtcs = 1;
129 129
130 ret = drm_encoder_init(dev, encoder, &panel_encoder_funcs, 130 ret = drm_encoder_init(dev, encoder, &panel_encoder_funcs,
131 DRM_MODE_ENCODER_LVDS); 131 DRM_MODE_ENCODER_LVDS, NULL);
132 if (ret < 0) 132 if (ret < 0)
133 goto fail; 133 goto fail;
134 134
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
index 354c47ca6374..5052a8af7ecb 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -138,7 +138,7 @@ static struct drm_encoder *tfp410_encoder_create(struct drm_device *dev,
138 encoder->possible_crtcs = 1; 138 encoder->possible_crtcs = 1;
139 139
140 ret = drm_encoder_init(dev, encoder, &tfp410_encoder_funcs, 140 ret = drm_encoder_init(dev, encoder, &tfp410_encoder_funcs,
141 DRM_MODE_ENCODER_TMDS); 141 DRM_MODE_ENCODER_TMDS, NULL);
142 if (ret < 0) 142 if (ret < 0)
143 goto fail; 143 goto fail;
144 144
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 80adbac82bde..4a064efcea58 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -108,7 +108,7 @@ void udl_fbdev_unplug(struct drm_device *dev);
108struct drm_framebuffer * 108struct drm_framebuffer *
109udl_fb_user_fb_create(struct drm_device *dev, 109udl_fb_user_fb_create(struct drm_device *dev,
110 struct drm_file *file, 110 struct drm_file *file,
111 struct drm_mode_fb_cmd2 *mode_cmd); 111 const struct drm_mode_fb_cmd2 *mode_cmd);
112 112
113int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr, 113int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
114 const char *front, char **urb_buf_ptr, 114 const char *front, char **urb_buf_ptr,
diff --git a/drivers/gpu/drm/udl/udl_encoder.c b/drivers/gpu/drm/udl/udl_encoder.c
index 4052c4656498..a181a647fcf9 100644
--- a/drivers/gpu/drm/udl/udl_encoder.c
+++ b/drivers/gpu/drm/udl/udl_encoder.c
@@ -73,7 +73,8 @@ struct drm_encoder *udl_encoder_init(struct drm_device *dev)
73 if (!encoder) 73 if (!encoder)
74 return NULL; 74 return NULL;
75 75
76 drm_encoder_init(dev, encoder, &udl_enc_funcs, DRM_MODE_ENCODER_TMDS); 76 drm_encoder_init(dev, encoder, &udl_enc_funcs, DRM_MODE_ENCODER_TMDS,
77 NULL);
77 drm_encoder_helper_add(encoder, &udl_helper_funcs); 78 drm_encoder_helper_add(encoder, &udl_helper_funcs);
78 encoder->possible_crtcs = 1; 79 encoder->possible_crtcs = 1;
79 return encoder; 80 return encoder;
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 62c7b1dafaa4..200419d4d43c 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -33,7 +33,6 @@ module_param(fb_defio, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
33struct udl_fbdev { 33struct udl_fbdev {
34 struct drm_fb_helper helper; 34 struct drm_fb_helper helper;
35 struct udl_framebuffer ufb; 35 struct udl_framebuffer ufb;
36 struct list_head fbdev_list;
37 int fb_count; 36 int fb_count;
38}; 37};
39 38
@@ -456,7 +455,7 @@ static const struct drm_framebuffer_funcs udlfb_funcs = {
456static int 455static int
457udl_framebuffer_init(struct drm_device *dev, 456udl_framebuffer_init(struct drm_device *dev,
458 struct udl_framebuffer *ufb, 457 struct udl_framebuffer *ufb,
459 struct drm_mode_fb_cmd2 *mode_cmd, 458 const struct drm_mode_fb_cmd2 *mode_cmd,
460 struct udl_gem_object *obj) 459 struct udl_gem_object *obj)
461{ 460{
462 int ret; 461 int ret;
@@ -624,7 +623,7 @@ void udl_fbdev_unplug(struct drm_device *dev)
624struct drm_framebuffer * 623struct drm_framebuffer *
625udl_fb_user_fb_create(struct drm_device *dev, 624udl_fb_user_fb_create(struct drm_device *dev,
626 struct drm_file *file, 625 struct drm_file *file,
627 struct drm_mode_fb_cmd2 *mode_cmd) 626 const struct drm_mode_fb_cmd2 *mode_cmd)
628{ 627{
629 struct drm_gem_object *obj; 628 struct drm_gem_object *obj;
630 struct udl_framebuffer *ufb; 629 struct udl_framebuffer *ufb;
diff --git a/drivers/gpu/drm/vc4/Makefile b/drivers/gpu/drm/vc4/Makefile
index 32b4f9cd8f52..4c6a99f0398c 100644
--- a/drivers/gpu/drm/vc4/Makefile
+++ b/drivers/gpu/drm/vc4/Makefile
@@ -8,10 +8,19 @@ vc4-y := \
8 vc4_crtc.o \ 8 vc4_crtc.o \
9 vc4_drv.o \ 9 vc4_drv.o \
10 vc4_kms.o \ 10 vc4_kms.o \
11 vc4_gem.o \
11 vc4_hdmi.o \ 12 vc4_hdmi.o \
12 vc4_hvs.o \ 13 vc4_hvs.o \
13 vc4_plane.o 14 vc4_irq.o \
15 vc4_plane.o \
16 vc4_render_cl.o \
17 vc4_trace_points.o \
18 vc4_v3d.o \
19 vc4_validate.o \
20 vc4_validate_shaders.o
14 21
15vc4-$(CONFIG_DEBUG_FS) += vc4_debugfs.o 22vc4-$(CONFIG_DEBUG_FS) += vc4_debugfs.o
16 23
17obj-$(CONFIG_DRM_VC4) += vc4.o 24obj-$(CONFIG_DRM_VC4) += vc4.o
25
26CFLAGS_vc4_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index ab9f5108ae1a..18dfe3ec9a62 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -12,19 +12,236 @@
12 * access to system memory with no MMU in between. To support it, we 12 * access to system memory with no MMU in between. To support it, we
13 * use the GEM CMA helper functions to allocate contiguous ranges of 13 * use the GEM CMA helper functions to allocate contiguous ranges of
14 * physical memory for our BOs. 14 * physical memory for our BOs.
15 *
16 * Since the CMA allocator is very slow, we keep a cache of recently
17 * freed BOs around so that the kernel's allocation of objects for 3D
18 * rendering can return quickly.
15 */ 19 */
16 20
17#include "vc4_drv.h" 21#include "vc4_drv.h"
22#include "uapi/drm/vc4_drm.h"
23
24static void vc4_bo_stats_dump(struct vc4_dev *vc4)
25{
26 DRM_INFO("num bos allocated: %d\n",
27 vc4->bo_stats.num_allocated);
28 DRM_INFO("size bos allocated: %dkb\n",
29 vc4->bo_stats.size_allocated / 1024);
30 DRM_INFO("num bos used: %d\n",
31 vc4->bo_stats.num_allocated - vc4->bo_stats.num_cached);
32 DRM_INFO("size bos used: %dkb\n",
33 (vc4->bo_stats.size_allocated -
34 vc4->bo_stats.size_cached) / 1024);
35 DRM_INFO("num bos cached: %d\n",
36 vc4->bo_stats.num_cached);
37 DRM_INFO("size bos cached: %dkb\n",
38 vc4->bo_stats.size_cached / 1024);
39}
40
41#ifdef CONFIG_DEBUG_FS
42int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
43{
44 struct drm_info_node *node = (struct drm_info_node *)m->private;
45 struct drm_device *dev = node->minor->dev;
46 struct vc4_dev *vc4 = to_vc4_dev(dev);
47 struct vc4_bo_stats stats;
48
49 /* Take a snapshot of the current stats with the lock held. */
50 mutex_lock(&vc4->bo_lock);
51 stats = vc4->bo_stats;
52 mutex_unlock(&vc4->bo_lock);
53
54 seq_printf(m, "num bos allocated: %d\n",
55 stats.num_allocated);
56 seq_printf(m, "size bos allocated: %dkb\n",
57 stats.size_allocated / 1024);
58 seq_printf(m, "num bos used: %d\n",
59 stats.num_allocated - stats.num_cached);
60 seq_printf(m, "size bos used: %dkb\n",
61 (stats.size_allocated - stats.size_cached) / 1024);
62 seq_printf(m, "num bos cached: %d\n",
63 stats.num_cached);
64 seq_printf(m, "size bos cached: %dkb\n",
65 stats.size_cached / 1024);
66
67 return 0;
68}
69#endif
70
71static uint32_t bo_page_index(size_t size)
72{
73 return (size / PAGE_SIZE) - 1;
74}
75
76/* Must be called with bo_lock held. */
77static void vc4_bo_destroy(struct vc4_bo *bo)
78{
79 struct drm_gem_object *obj = &bo->base.base;
80 struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
81
82 if (bo->validated_shader) {
83 kfree(bo->validated_shader->texture_samples);
84 kfree(bo->validated_shader);
85 bo->validated_shader = NULL;
86 }
87
88 vc4->bo_stats.num_allocated--;
89 vc4->bo_stats.size_allocated -= obj->size;
90 drm_gem_cma_free_object(obj);
91}
92
93/* Must be called with bo_lock held. */
94static void vc4_bo_remove_from_cache(struct vc4_bo *bo)
95{
96 struct drm_gem_object *obj = &bo->base.base;
97 struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
98
99 vc4->bo_stats.num_cached--;
100 vc4->bo_stats.size_cached -= obj->size;
101
102 list_del(&bo->unref_head);
103 list_del(&bo->size_head);
104}
105
106static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev,
107 size_t size)
108{
109 struct vc4_dev *vc4 = to_vc4_dev(dev);
110 uint32_t page_index = bo_page_index(size);
111
112 if (vc4->bo_cache.size_list_size <= page_index) {
113 uint32_t new_size = max(vc4->bo_cache.size_list_size * 2,
114 page_index + 1);
115 struct list_head *new_list;
116 uint32_t i;
117
118 new_list = kmalloc_array(new_size, sizeof(struct list_head),
119 GFP_KERNEL);
120 if (!new_list)
121 return NULL;
122
123 /* Rebase the old cached BO lists to their new list
124 * head locations.
125 */
126 for (i = 0; i < vc4->bo_cache.size_list_size; i++) {
127 struct list_head *old_list =
128 &vc4->bo_cache.size_list[i];
129
130 if (list_empty(old_list))
131 INIT_LIST_HEAD(&new_list[i]);
132 else
133 list_replace(old_list, &new_list[i]);
134 }
135 /* And initialize the brand new BO list heads. */
136 for (i = vc4->bo_cache.size_list_size; i < new_size; i++)
137 INIT_LIST_HEAD(&new_list[i]);
138
139 kfree(vc4->bo_cache.size_list);
140 vc4->bo_cache.size_list = new_list;
141 vc4->bo_cache.size_list_size = new_size;
142 }
143
144 return &vc4->bo_cache.size_list[page_index];
145}
146
147void vc4_bo_cache_purge(struct drm_device *dev)
148{
149 struct vc4_dev *vc4 = to_vc4_dev(dev);
150
151 mutex_lock(&vc4->bo_lock);
152 while (!list_empty(&vc4->bo_cache.time_list)) {
153 struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
154 struct vc4_bo, unref_head);
155 vc4_bo_remove_from_cache(bo);
156 vc4_bo_destroy(bo);
157 }
158 mutex_unlock(&vc4->bo_lock);
159}
160
161static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev,
162 uint32_t size)
163{
164 struct vc4_dev *vc4 = to_vc4_dev(dev);
165 uint32_t page_index = bo_page_index(size);
166 struct vc4_bo *bo = NULL;
167
168 size = roundup(size, PAGE_SIZE);
169
170 mutex_lock(&vc4->bo_lock);
171 if (page_index >= vc4->bo_cache.size_list_size)
172 goto out;
18 173
19struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size) 174 if (list_empty(&vc4->bo_cache.size_list[page_index]))
175 goto out;
176
177 bo = list_first_entry(&vc4->bo_cache.size_list[page_index],
178 struct vc4_bo, size_head);
179 vc4_bo_remove_from_cache(bo);
180 kref_init(&bo->base.base.refcount);
181
182out:
183 mutex_unlock(&vc4->bo_lock);
184 return bo;
185}
186
187/**
188 * vc4_gem_create_object - Implementation of driver->gem_create_object.
189 *
190 * This lets the CMA helpers allocate object structs for us, and keep
191 * our BO stats correct.
192 */
193struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
20{ 194{
195 struct vc4_dev *vc4 = to_vc4_dev(dev);
196 struct vc4_bo *bo;
197
198 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
199 if (!bo)
200 return ERR_PTR(-ENOMEM);
201
202 mutex_lock(&vc4->bo_lock);
203 vc4->bo_stats.num_allocated++;
204 vc4->bo_stats.size_allocated += size;
205 mutex_unlock(&vc4->bo_lock);
206
207 return &bo->base.base;
208}
209
210struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
211 bool from_cache)
212{
213 size_t size = roundup(unaligned_size, PAGE_SIZE);
214 struct vc4_dev *vc4 = to_vc4_dev(dev);
21 struct drm_gem_cma_object *cma_obj; 215 struct drm_gem_cma_object *cma_obj;
22 216
23 cma_obj = drm_gem_cma_create(dev, size); 217 if (size == 0)
24 if (IS_ERR(cma_obj))
25 return NULL; 218 return NULL;
26 else 219
27 return to_vc4_bo(&cma_obj->base); 220 /* First, try to get a vc4_bo from the kernel BO cache. */
221 if (from_cache) {
222 struct vc4_bo *bo = vc4_bo_get_from_cache(dev, size);
223
224 if (bo)
225 return bo;
226 }
227
228 cma_obj = drm_gem_cma_create(dev, size);
229 if (IS_ERR(cma_obj)) {
230 /*
231 * If we've run out of CMA memory, kill the cache of
232 * CMA allocations we've got laying around and try again.
233 */
234 vc4_bo_cache_purge(dev);
235
236 cma_obj = drm_gem_cma_create(dev, size);
237 if (IS_ERR(cma_obj)) {
238 DRM_ERROR("Failed to allocate from CMA:\n");
239 vc4_bo_stats_dump(vc4);
240 return NULL;
241 }
242 }
243
244 return to_vc4_bo(&cma_obj->base);
28} 245}
29 246
30int vc4_dumb_create(struct drm_file *file_priv, 247int vc4_dumb_create(struct drm_file *file_priv,
@@ -41,7 +258,191 @@ int vc4_dumb_create(struct drm_file *file_priv,
41 if (args->size < args->pitch * args->height) 258 if (args->size < args->pitch * args->height)
42 args->size = args->pitch * args->height; 259 args->size = args->pitch * args->height;
43 260
44 bo = vc4_bo_create(dev, roundup(args->size, PAGE_SIZE)); 261 bo = vc4_bo_create(dev, args->size, false);
262 if (!bo)
263 return -ENOMEM;
264
265 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
266 drm_gem_object_unreference_unlocked(&bo->base.base);
267
268 return ret;
269}
270
271/* Must be called with bo_lock held. */
272static void vc4_bo_cache_free_old(struct drm_device *dev)
273{
274 struct vc4_dev *vc4 = to_vc4_dev(dev);
275 unsigned long expire_time = jiffies - msecs_to_jiffies(1000);
276
277 while (!list_empty(&vc4->bo_cache.time_list)) {
278 struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
279 struct vc4_bo, unref_head);
280 if (time_before(expire_time, bo->free_time)) {
281 mod_timer(&vc4->bo_cache.time_timer,
282 round_jiffies_up(jiffies +
283 msecs_to_jiffies(1000)));
284 return;
285 }
286
287 vc4_bo_remove_from_cache(bo);
288 vc4_bo_destroy(bo);
289 }
290}
291
292/* Called on the last userspace/kernel unreference of the BO. Returns
293 * it to the BO cache if possible, otherwise frees it.
294 *
295 * Note that this is called with the struct_mutex held.
296 */
297void vc4_free_object(struct drm_gem_object *gem_bo)
298{
299 struct drm_device *dev = gem_bo->dev;
300 struct vc4_dev *vc4 = to_vc4_dev(dev);
301 struct vc4_bo *bo = to_vc4_bo(gem_bo);
302 struct list_head *cache_list;
303
304 mutex_lock(&vc4->bo_lock);
305 /* If the object references someone else's memory, we can't cache it.
306 */
307 if (gem_bo->import_attach) {
308 vc4_bo_destroy(bo);
309 goto out;
310 }
311
312 /* Don't cache if it was publicly named. */
313 if (gem_bo->name) {
314 vc4_bo_destroy(bo);
315 goto out;
316 }
317
318 cache_list = vc4_get_cache_list_for_size(dev, gem_bo->size);
319 if (!cache_list) {
320 vc4_bo_destroy(bo);
321 goto out;
322 }
323
324 if (bo->validated_shader) {
325 kfree(bo->validated_shader->texture_samples);
326 kfree(bo->validated_shader);
327 bo->validated_shader = NULL;
328 }
329
330 bo->free_time = jiffies;
331 list_add(&bo->size_head, cache_list);
332 list_add(&bo->unref_head, &vc4->bo_cache.time_list);
333
334 vc4->bo_stats.num_cached++;
335 vc4->bo_stats.size_cached += gem_bo->size;
336
337 vc4_bo_cache_free_old(dev);
338
339out:
340 mutex_unlock(&vc4->bo_lock);
341}
342
343static void vc4_bo_cache_time_work(struct work_struct *work)
344{
345 struct vc4_dev *vc4 =
346 container_of(work, struct vc4_dev, bo_cache.time_work);
347 struct drm_device *dev = vc4->dev;
348
349 mutex_lock(&vc4->bo_lock);
350 vc4_bo_cache_free_old(dev);
351 mutex_unlock(&vc4->bo_lock);
352}
353
354static void vc4_bo_cache_time_timer(unsigned long data)
355{
356 struct drm_device *dev = (struct drm_device *)data;
357 struct vc4_dev *vc4 = to_vc4_dev(dev);
358
359 schedule_work(&vc4->bo_cache.time_work);
360}
361
362struct dma_buf *
363vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
364{
365 struct vc4_bo *bo = to_vc4_bo(obj);
366
367 if (bo->validated_shader) {
368 DRM_ERROR("Attempting to export shader BO\n");
369 return ERR_PTR(-EINVAL);
370 }
371
372 return drm_gem_prime_export(dev, obj, flags);
373}
374
375int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
376{
377 struct drm_gem_object *gem_obj;
378 struct vc4_bo *bo;
379 int ret;
380
381 ret = drm_gem_mmap(filp, vma);
382 if (ret)
383 return ret;
384
385 gem_obj = vma->vm_private_data;
386 bo = to_vc4_bo(gem_obj);
387
388 if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
389 DRM_ERROR("mmaping of shader BOs for writing not allowed.\n");
390 return -EINVAL;
391 }
392
393 /*
394 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
395 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
396 * the whole buffer.
397 */
398 vma->vm_flags &= ~VM_PFNMAP;
399 vma->vm_pgoff = 0;
400
401 ret = dma_mmap_writecombine(bo->base.base.dev->dev, vma,
402 bo->base.vaddr, bo->base.paddr,
403 vma->vm_end - vma->vm_start);
404 if (ret)
405 drm_gem_vm_close(vma);
406
407 return ret;
408}
409
410int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
411{
412 struct vc4_bo *bo = to_vc4_bo(obj);
413
414 if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
415 DRM_ERROR("mmaping of shader BOs for writing not allowed.\n");
416 return -EINVAL;
417 }
418
419 return drm_gem_cma_prime_mmap(obj, vma);
420}
421
422void *vc4_prime_vmap(struct drm_gem_object *obj)
423{
424 struct vc4_bo *bo = to_vc4_bo(obj);
425
426 if (bo->validated_shader) {
427 DRM_ERROR("mmaping of shader BOs not allowed.\n");
428 return ERR_PTR(-EINVAL);
429 }
430
431 return drm_gem_cma_prime_vmap(obj);
432}
433
434int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
435 struct drm_file *file_priv)
436{
437 struct drm_vc4_create_bo *args = data;
438 struct vc4_bo *bo = NULL;
439 int ret;
440
441 /*
442 * We can't allocate from the BO cache, because the BOs don't
443 * get zeroed, and that might leak data between users.
444 */
445 bo = vc4_bo_create(dev, args->size, false);
45 if (!bo) 446 if (!bo)
46 return -ENOMEM; 447 return -ENOMEM;
47 448
@@ -50,3 +451,107 @@ int vc4_dumb_create(struct drm_file *file_priv,
50 451
51 return ret; 452 return ret;
52} 453}
454
455int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
456 struct drm_file *file_priv)
457{
458 struct drm_vc4_mmap_bo *args = data;
459 struct drm_gem_object *gem_obj;
460
461 gem_obj = drm_gem_object_lookup(dev, file_priv, args->handle);
462 if (!gem_obj) {
463 DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
464 return -EINVAL;
465 }
466
467 /* The mmap offset was set up at BO allocation time. */
468 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
469
470 drm_gem_object_unreference_unlocked(gem_obj);
471 return 0;
472}
473
474int
475vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
476 struct drm_file *file_priv)
477{
478 struct drm_vc4_create_shader_bo *args = data;
479 struct vc4_bo *bo = NULL;
480 int ret;
481
482 if (args->size == 0)
483 return -EINVAL;
484
485 if (args->size % sizeof(u64) != 0)
486 return -EINVAL;
487
488 if (args->flags != 0) {
489 DRM_INFO("Unknown flags set: 0x%08x\n", args->flags);
490 return -EINVAL;
491 }
492
493 if (args->pad != 0) {
494 DRM_INFO("Pad set: 0x%08x\n", args->pad);
495 return -EINVAL;
496 }
497
498 bo = vc4_bo_create(dev, args->size, true);
499 if (!bo)
500 return -ENOMEM;
501
502 ret = copy_from_user(bo->base.vaddr,
503 (void __user *)(uintptr_t)args->data,
504 args->size);
505 if (ret != 0)
506 goto fail;
507 /* Clear the rest of the memory from allocating from the BO
508 * cache.
509 */
510 memset(bo->base.vaddr + args->size, 0,
511 bo->base.base.size - args->size);
512
513 bo->validated_shader = vc4_validate_shader(&bo->base);
514 if (!bo->validated_shader) {
515 ret = -EINVAL;
516 goto fail;
517 }
518
519 /* We have to create the handle after validation, to avoid
520 * races for users to do doing things like mmap the shader BO.
521 */
522 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
523
524 fail:
525 drm_gem_object_unreference_unlocked(&bo->base.base);
526
527 return ret;
528}
529
530void vc4_bo_cache_init(struct drm_device *dev)
531{
532 struct vc4_dev *vc4 = to_vc4_dev(dev);
533
534 mutex_init(&vc4->bo_lock);
535
536 INIT_LIST_HEAD(&vc4->bo_cache.time_list);
537
538 INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
539 setup_timer(&vc4->bo_cache.time_timer,
540 vc4_bo_cache_time_timer,
541 (unsigned long)dev);
542}
543
544void vc4_bo_cache_destroy(struct drm_device *dev)
545{
546 struct vc4_dev *vc4 = to_vc4_dev(dev);
547
548 del_timer(&vc4->bo_cache.time_timer);
549 cancel_work_sync(&vc4->bo_cache.time_work);
550
551 vc4_bo_cache_purge(dev);
552
553 if (vc4->bo_stats.num_allocated) {
554 DRM_ERROR("Destroying BO cache while BOs still allocated:\n");
555 vc4_bo_stats_dump(vc4);
556 }
557}
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 7a9f4768591e..8d0d70e51ef2 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -35,6 +35,7 @@
35#include "drm_atomic_helper.h" 35#include "drm_atomic_helper.h"
36#include "drm_crtc_helper.h" 36#include "drm_crtc_helper.h"
37#include "linux/clk.h" 37#include "linux/clk.h"
38#include "drm_fb_cma_helper.h"
38#include "linux/component.h" 39#include "linux/component.h"
39#include "linux/of_device.h" 40#include "linux/of_device.h"
40#include "vc4_drv.h" 41#include "vc4_drv.h"
@@ -168,7 +169,7 @@ static int vc4_get_clock_select(struct drm_crtc *crtc)
168 struct drm_connector *connector; 169 struct drm_connector *connector;
169 170
170 drm_for_each_connector(connector, crtc->dev) { 171 drm_for_each_connector(connector, crtc->dev) {
171 if (connector && connector->state->crtc == crtc) { 172 if (connector->state->crtc == crtc) {
172 struct drm_encoder *encoder = connector->encoder; 173 struct drm_encoder *encoder = connector->encoder;
173 struct vc4_encoder *vc4_encoder = 174 struct vc4_encoder *vc4_encoder =
174 to_vc4_encoder(encoder); 175 to_vc4_encoder(encoder);
@@ -401,7 +402,8 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
401 dlist_next++; 402 dlist_next++;
402 403
403 HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), 404 HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
404 (u32 *)vc4_crtc->dlist - (u32 *)vc4->hvs->dlist); 405 (u32 __iomem *)vc4_crtc->dlist -
406 (u32 __iomem *)vc4->hvs->dlist);
405 407
406 /* Make the next display list start after ours. */ 408 /* Make the next display list start after ours. */
407 vc4_crtc->dlist_size -= (dlist_next - vc4_crtc->dlist); 409 vc4_crtc->dlist_size -= (dlist_next - vc4_crtc->dlist);
@@ -475,10 +477,106 @@ static irqreturn_t vc4_crtc_irq_handler(int irq, void *data)
475 return ret; 477 return ret;
476} 478}
477 479
480struct vc4_async_flip_state {
481 struct drm_crtc *crtc;
482 struct drm_framebuffer *fb;
483 struct drm_pending_vblank_event *event;
484
485 struct vc4_seqno_cb cb;
486};
487
488/* Called when the V3D execution for the BO being flipped to is done, so that
489 * we can actually update the plane's address to point to it.
490 */
491static void
492vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
493{
494 struct vc4_async_flip_state *flip_state =
495 container_of(cb, struct vc4_async_flip_state, cb);
496 struct drm_crtc *crtc = flip_state->crtc;
497 struct drm_device *dev = crtc->dev;
498 struct vc4_dev *vc4 = to_vc4_dev(dev);
499 struct drm_plane *plane = crtc->primary;
500
501 vc4_plane_async_set_fb(plane, flip_state->fb);
502 if (flip_state->event) {
503 unsigned long flags;
504
505 spin_lock_irqsave(&dev->event_lock, flags);
506 drm_crtc_send_vblank_event(crtc, flip_state->event);
507 spin_unlock_irqrestore(&dev->event_lock, flags);
508 }
509
510 drm_framebuffer_unreference(flip_state->fb);
511 kfree(flip_state);
512
513 up(&vc4->async_modeset);
514}
515
516/* Implements async (non-vblank-synced) page flips.
517 *
518 * The page flip ioctl needs to return immediately, so we grab the
519 * modeset semaphore on the pipe, and queue the address update for
520 * when V3D is done with the BO being flipped to.
521 */
522static int vc4_async_page_flip(struct drm_crtc *crtc,
523 struct drm_framebuffer *fb,
524 struct drm_pending_vblank_event *event,
525 uint32_t flags)
526{
527 struct drm_device *dev = crtc->dev;
528 struct vc4_dev *vc4 = to_vc4_dev(dev);
529 struct drm_plane *plane = crtc->primary;
530 int ret = 0;
531 struct vc4_async_flip_state *flip_state;
532 struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0);
533 struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
534
535 flip_state = kzalloc(sizeof(*flip_state), GFP_KERNEL);
536 if (!flip_state)
537 return -ENOMEM;
538
539 drm_framebuffer_reference(fb);
540 flip_state->fb = fb;
541 flip_state->crtc = crtc;
542 flip_state->event = event;
543
544 /* Make sure all other async modesetes have landed. */
545 ret = down_interruptible(&vc4->async_modeset);
546 if (ret) {
547 kfree(flip_state);
548 return ret;
549 }
550
551 /* Immediately update the plane's legacy fb pointer, so that later
552 * modeset prep sees the state that will be present when the semaphore
553 * is released.
554 */
555 drm_atomic_set_fb_for_plane(plane->state, fb);
556 plane->fb = fb;
557
558 vc4_queue_seqno_cb(dev, &flip_state->cb, bo->seqno,
559 vc4_async_page_flip_complete);
560
561 /* Driver takes ownership of state on successful async commit. */
562 return 0;
563}
564
565static int vc4_page_flip(struct drm_crtc *crtc,
566 struct drm_framebuffer *fb,
567 struct drm_pending_vblank_event *event,
568 uint32_t flags)
569{
570 if (flags & DRM_MODE_PAGE_FLIP_ASYNC)
571 return vc4_async_page_flip(crtc, fb, event, flags);
572 else
573 return drm_atomic_helper_page_flip(crtc, fb, event, flags);
574}
575
478static const struct drm_crtc_funcs vc4_crtc_funcs = { 576static const struct drm_crtc_funcs vc4_crtc_funcs = {
479 .set_config = drm_atomic_helper_set_config, 577 .set_config = drm_atomic_helper_set_config,
480 .destroy = vc4_crtc_destroy, 578 .destroy = vc4_crtc_destroy,
481 .page_flip = drm_atomic_helper_page_flip, 579 .page_flip = vc4_page_flip,
482 .set_property = NULL, 580 .set_property = NULL,
483 .cursor_set = NULL, /* handled by drm_mode_cursor_universal */ 581 .cursor_set = NULL, /* handled by drm_mode_cursor_universal */
484 .cursor_move = NULL, /* handled by drm_mode_cursor_universal */ 582 .cursor_move = NULL, /* handled by drm_mode_cursor_universal */
@@ -591,21 +689,21 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
591 * that will take too much. 689 * that will take too much.
592 */ 690 */
593 primary_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_PRIMARY); 691 primary_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_PRIMARY);
594 if (!primary_plane) { 692 if (IS_ERR(primary_plane)) {
595 dev_err(dev, "failed to construct primary plane\n"); 693 dev_err(dev, "failed to construct primary plane\n");
596 ret = PTR_ERR(primary_plane); 694 ret = PTR_ERR(primary_plane);
597 goto err; 695 goto err;
598 } 696 }
599 697
600 cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR); 698 cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR);
601 if (!cursor_plane) { 699 if (IS_ERR(cursor_plane)) {
602 dev_err(dev, "failed to construct cursor plane\n"); 700 dev_err(dev, "failed to construct cursor plane\n");
603 ret = PTR_ERR(cursor_plane); 701 ret = PTR_ERR(cursor_plane);
604 goto err_primary; 702 goto err_primary;
605 } 703 }
606 704
607 drm_crtc_init_with_planes(drm, crtc, primary_plane, cursor_plane, 705 drm_crtc_init_with_planes(drm, crtc, primary_plane, cursor_plane,
608 &vc4_crtc_funcs); 706 &vc4_crtc_funcs, NULL);
609 drm_crtc_helper_add(crtc, &vc4_crtc_helper_funcs); 707 drm_crtc_helper_add(crtc, &vc4_crtc_helper_funcs);
610 primary_plane->crtc = crtc; 708 primary_plane->crtc = crtc;
611 cursor_plane->crtc = crtc; 709 cursor_plane->crtc = crtc;
diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c
index 4297b0a5b74e..d76ad10b07fd 100644
--- a/drivers/gpu/drm/vc4/vc4_debugfs.c
+++ b/drivers/gpu/drm/vc4/vc4_debugfs.c
@@ -16,11 +16,14 @@
16#include "vc4_regs.h" 16#include "vc4_regs.h"
17 17
18static const struct drm_info_list vc4_debugfs_list[] = { 18static const struct drm_info_list vc4_debugfs_list[] = {
19 {"bo_stats", vc4_bo_stats_debugfs, 0},
19 {"hdmi_regs", vc4_hdmi_debugfs_regs, 0}, 20 {"hdmi_regs", vc4_hdmi_debugfs_regs, 0},
20 {"hvs_regs", vc4_hvs_debugfs_regs, 0}, 21 {"hvs_regs", vc4_hvs_debugfs_regs, 0},
21 {"crtc0_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)0}, 22 {"crtc0_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)0},
22 {"crtc1_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)1}, 23 {"crtc1_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)1},
23 {"crtc2_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)2}, 24 {"crtc2_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)2},
25 {"v3d_ident", vc4_v3d_debugfs_ident, 0},
26 {"v3d_regs", vc4_v3d_debugfs_regs, 0},
24}; 27};
25 28
26#define VC4_DEBUGFS_ENTRIES ARRAY_SIZE(vc4_debugfs_list) 29#define VC4_DEBUGFS_ENTRIES ARRAY_SIZE(vc4_debugfs_list)
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 6e730605edcc..cbcbbb83500e 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -16,6 +16,7 @@
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include "drm_fb_cma_helper.h" 17#include "drm_fb_cma_helper.h"
18 18
19#include "uapi/drm/vc4_drm.h"
19#include "vc4_drv.h" 20#include "vc4_drv.h"
20#include "vc4_regs.h" 21#include "vc4_regs.h"
21 22
@@ -63,7 +64,7 @@ static const struct file_operations vc4_drm_fops = {
63 .open = drm_open, 64 .open = drm_open,
64 .release = drm_release, 65 .release = drm_release,
65 .unlocked_ioctl = drm_ioctl, 66 .unlocked_ioctl = drm_ioctl,
66 .mmap = drm_gem_cma_mmap, 67 .mmap = vc4_mmap,
67 .poll = drm_poll, 68 .poll = drm_poll,
68 .read = drm_read, 69 .read = drm_read,
69#ifdef CONFIG_COMPAT 70#ifdef CONFIG_COMPAT
@@ -73,16 +74,30 @@ static const struct file_operations vc4_drm_fops = {
73}; 74};
74 75
75static const struct drm_ioctl_desc vc4_drm_ioctls[] = { 76static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
77 DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, 0),
78 DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, 0),
79 DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, 0),
80 DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, 0),
81 DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, 0),
82 DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, 0),
83 DRM_IOCTL_DEF_DRV(VC4_GET_HANG_STATE, vc4_get_hang_state_ioctl,
84 DRM_ROOT_ONLY),
76}; 85};
77 86
78static struct drm_driver vc4_drm_driver = { 87static struct drm_driver vc4_drm_driver = {
79 .driver_features = (DRIVER_MODESET | 88 .driver_features = (DRIVER_MODESET |
80 DRIVER_ATOMIC | 89 DRIVER_ATOMIC |
81 DRIVER_GEM | 90 DRIVER_GEM |
91 DRIVER_HAVE_IRQ |
82 DRIVER_PRIME), 92 DRIVER_PRIME),
83 .lastclose = vc4_lastclose, 93 .lastclose = vc4_lastclose,
84 .preclose = vc4_drm_preclose, 94 .preclose = vc4_drm_preclose,
85 95
96 .irq_handler = vc4_irq,
97 .irq_preinstall = vc4_irq_preinstall,
98 .irq_postinstall = vc4_irq_postinstall,
99 .irq_uninstall = vc4_irq_uninstall,
100
86 .enable_vblank = vc4_enable_vblank, 101 .enable_vblank = vc4_enable_vblank,
87 .disable_vblank = vc4_disable_vblank, 102 .disable_vblank = vc4_disable_vblank,
88 .get_vblank_counter = drm_vblank_count, 103 .get_vblank_counter = drm_vblank_count,
@@ -92,18 +107,19 @@ static struct drm_driver vc4_drm_driver = {
92 .debugfs_cleanup = vc4_debugfs_cleanup, 107 .debugfs_cleanup = vc4_debugfs_cleanup,
93#endif 108#endif
94 109
95 .gem_free_object = drm_gem_cma_free_object, 110 .gem_create_object = vc4_create_object,
111 .gem_free_object = vc4_free_object,
96 .gem_vm_ops = &drm_gem_cma_vm_ops, 112 .gem_vm_ops = &drm_gem_cma_vm_ops,
97 113
98 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 114 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
99 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 115 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
100 .gem_prime_import = drm_gem_prime_import, 116 .gem_prime_import = drm_gem_prime_import,
101 .gem_prime_export = drm_gem_prime_export, 117 .gem_prime_export = vc4_prime_export,
102 .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, 118 .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
103 .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, 119 .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
104 .gem_prime_vmap = drm_gem_cma_prime_vmap, 120 .gem_prime_vmap = vc4_prime_vmap,
105 .gem_prime_vunmap = drm_gem_cma_prime_vunmap, 121 .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
106 .gem_prime_mmap = drm_gem_cma_prime_mmap, 122 .gem_prime_mmap = vc4_prime_mmap,
107 123
108 .dumb_create = vc4_dumb_create, 124 .dumb_create = vc4_dumb_create,
109 .dumb_map_offset = drm_gem_cma_dumb_map_offset, 125 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
@@ -170,13 +186,17 @@ static int vc4_drm_bind(struct device *dev)
170 186
171 drm_dev_set_unique(drm, dev_name(dev)); 187 drm_dev_set_unique(drm, dev_name(dev));
172 188
189 vc4_bo_cache_init(drm);
190
173 drm_mode_config_init(drm); 191 drm_mode_config_init(drm);
174 if (ret) 192 if (ret)
175 goto unref; 193 goto unref;
176 194
195 vc4_gem_init(drm);
196
177 ret = component_bind_all(dev, drm); 197 ret = component_bind_all(dev, drm);
178 if (ret) 198 if (ret)
179 goto unref; 199 goto gem_destroy;
180 200
181 ret = drm_dev_register(drm, 0); 201 ret = drm_dev_register(drm, 0);
182 if (ret < 0) 202 if (ret < 0)
@@ -200,8 +220,11 @@ unregister:
200 drm_dev_unregister(drm); 220 drm_dev_unregister(drm);
201unbind_all: 221unbind_all:
202 component_unbind_all(dev, drm); 222 component_unbind_all(dev, drm);
223gem_destroy:
224 vc4_gem_destroy(drm);
203unref: 225unref:
204 drm_dev_unref(drm); 226 drm_dev_unref(drm);
227 vc4_bo_cache_destroy(drm);
205 return ret; 228 return ret;
206} 229}
207 230
@@ -228,6 +251,7 @@ static struct platform_driver *const component_drivers[] = {
228 &vc4_hdmi_driver, 251 &vc4_hdmi_driver,
229 &vc4_crtc_driver, 252 &vc4_crtc_driver,
230 &vc4_hvs_driver, 253 &vc4_hvs_driver,
254 &vc4_v3d_driver,
231}; 255};
232 256
233static int vc4_platform_drm_probe(struct platform_device *pdev) 257static int vc4_platform_drm_probe(struct platform_device *pdev)
@@ -259,7 +283,6 @@ static struct platform_driver vc4_platform_driver = {
259 .remove = vc4_platform_drm_remove, 283 .remove = vc4_platform_drm_remove,
260 .driver = { 284 .driver = {
261 .name = "vc4-drm", 285 .name = "vc4-drm",
262 .owner = THIS_MODULE,
263 .of_match_table = vc4_of_match, 286 .of_match_table = vc4_of_match,
264 }, 287 },
265}; 288};
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index fd8319fa682e..080865ec2bae 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -15,8 +15,89 @@ struct vc4_dev {
15 struct vc4_hdmi *hdmi; 15 struct vc4_hdmi *hdmi;
16 struct vc4_hvs *hvs; 16 struct vc4_hvs *hvs;
17 struct vc4_crtc *crtc[3]; 17 struct vc4_crtc *crtc[3];
18 struct vc4_v3d *v3d;
18 19
19 struct drm_fbdev_cma *fbdev; 20 struct drm_fbdev_cma *fbdev;
21
22 struct vc4_hang_state *hang_state;
23
24 /* The kernel-space BO cache. Tracks buffers that have been
25 * unreferenced by all other users (refcounts of 0!) but not
26 * yet freed, so we can do cheap allocations.
27 */
28 struct vc4_bo_cache {
29 /* Array of list heads for entries in the BO cache,
30 * based on number of pages, so we can do O(1) lookups
31 * in the cache when allocating.
32 */
33 struct list_head *size_list;
34 uint32_t size_list_size;
35
36 /* List of all BOs in the cache, ordered by age, so we
37 * can do O(1) lookups when trying to free old
38 * buffers.
39 */
40 struct list_head time_list;
41 struct work_struct time_work;
42 struct timer_list time_timer;
43 } bo_cache;
44
45 struct vc4_bo_stats {
46 u32 num_allocated;
47 u32 size_allocated;
48 u32 num_cached;
49 u32 size_cached;
50 } bo_stats;
51
52 /* Protects bo_cache and the BO stats. */
53 struct mutex bo_lock;
54
55 /* Sequence number for the last job queued in job_list.
56 * Starts at 0 (no jobs emitted).
57 */
58 uint64_t emit_seqno;
59
60 /* Sequence number for the last completed job on the GPU.
61 * Starts at 0 (no jobs completed).
62 */
63 uint64_t finished_seqno;
64
65 /* List of all struct vc4_exec_info for jobs to be executed.
66 * The first job in the list is the one currently programmed
67 * into ct0ca/ct1ca for execution.
68 */
69 struct list_head job_list;
70 /* List of the finished vc4_exec_infos waiting to be freed by
71 * job_done_work.
72 */
73 struct list_head job_done_list;
74 /* Spinlock used to synchronize the job_list and seqno
75 * accesses between the IRQ handler and GEM ioctls.
76 */
77 spinlock_t job_lock;
78 wait_queue_head_t job_wait_queue;
79 struct work_struct job_done_work;
80
81 /* List of struct vc4_seqno_cb for callbacks to be made from a
82 * workqueue when the given seqno is passed.
83 */
84 struct list_head seqno_cb_list;
85
86 /* The binner overflow memory that's currently set up in
87 * BPOA/BPOS registers. When overflow occurs and a new one is
88 * allocated, the previous one will be moved to
89 * vc4->current_exec's free list.
90 */
91 struct vc4_bo *overflow_mem;
92 struct work_struct overflow_mem_work;
93
94 struct {
95 uint32_t last_ct0ca, last_ct1ca;
96 struct timer_list timer;
97 struct work_struct reset_work;
98 } hangcheck;
99
100 struct semaphore async_modeset;
20}; 101};
21 102
22static inline struct vc4_dev * 103static inline struct vc4_dev *
@@ -27,6 +108,25 @@ to_vc4_dev(struct drm_device *dev)
27 108
28struct vc4_bo { 109struct vc4_bo {
29 struct drm_gem_cma_object base; 110 struct drm_gem_cma_object base;
111
112 /* seqno of the last job to render to this BO. */
113 uint64_t seqno;
114
115 /* List entry for the BO's position in either
116 * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list
117 */
118 struct list_head unref_head;
119
120 /* Time in jiffies when the BO was put in vc4->bo_cache. */
121 unsigned long free_time;
122
123 /* List entry for the BO's position in vc4_dev->bo_cache.size_list */
124 struct list_head size_head;
125
126 /* Struct for shader validation state, if created by
127 * DRM_IOCTL_VC4_CREATE_SHADER_BO.
128 */
129 struct vc4_validated_shader_info *validated_shader;
30}; 130};
31 131
32static inline struct vc4_bo * 132static inline struct vc4_bo *
@@ -35,6 +135,17 @@ to_vc4_bo(struct drm_gem_object *bo)
35 return (struct vc4_bo *)bo; 135 return (struct vc4_bo *)bo;
36} 136}
37 137
138struct vc4_seqno_cb {
139 struct work_struct work;
140 uint64_t seqno;
141 void (*func)(struct vc4_seqno_cb *cb);
142};
143
144struct vc4_v3d {
145 struct platform_device *pdev;
146 void __iomem *regs;
147};
148
38struct vc4_hvs { 149struct vc4_hvs {
39 struct platform_device *pdev; 150 struct platform_device *pdev;
40 void __iomem *regs; 151 void __iomem *regs;
@@ -72,9 +183,142 @@ to_vc4_encoder(struct drm_encoder *encoder)
72 return container_of(encoder, struct vc4_encoder, base); 183 return container_of(encoder, struct vc4_encoder, base);
73} 184}
74 185
186#define V3D_READ(offset) readl(vc4->v3d->regs + offset)
187#define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset)
75#define HVS_READ(offset) readl(vc4->hvs->regs + offset) 188#define HVS_READ(offset) readl(vc4->hvs->regs + offset)
76#define HVS_WRITE(offset, val) writel(val, vc4->hvs->regs + offset) 189#define HVS_WRITE(offset, val) writel(val, vc4->hvs->regs + offset)
77 190
191struct vc4_exec_info {
192 /* Sequence number for this bin/render job. */
193 uint64_t seqno;
194
195 /* Kernel-space copy of the ioctl arguments */
196 struct drm_vc4_submit_cl *args;
197
198 /* This is the array of BOs that were looked up at the start of exec.
199 * Command validation will use indices into this array.
200 */
201 struct drm_gem_cma_object **bo;
202 uint32_t bo_count;
203
204 /* Pointers for our position in vc4->job_list */
205 struct list_head head;
206
207 /* List of other BOs used in the job that need to be released
208 * once the job is complete.
209 */
210 struct list_head unref_list;
211
212 /* Current unvalidated indices into @bo loaded by the non-hardware
213 * VC4_PACKET_GEM_HANDLES.
214 */
215 uint32_t bo_index[2];
216
217 /* This is the BO where we store the validated command lists, shader
218 * records, and uniforms.
219 */
220 struct drm_gem_cma_object *exec_bo;
221
222 /**
223 * This tracks the per-shader-record state (packet 64) that
224 * determines the length of the shader record and the offset
225 * it's expected to be found at. It gets read in from the
226 * command lists.
227 */
228 struct vc4_shader_state {
229 uint32_t addr;
230 /* Maximum vertex index referenced by any primitive using this
231 * shader state.
232 */
233 uint32_t max_index;
234 } *shader_state;
235
236 /** How many shader states the user declared they were using. */
237 uint32_t shader_state_size;
238 /** How many shader state records the validator has seen. */
239 uint32_t shader_state_count;
240
241 bool found_tile_binning_mode_config_packet;
242 bool found_start_tile_binning_packet;
243 bool found_increment_semaphore_packet;
244 bool found_flush;
245 uint8_t bin_tiles_x, bin_tiles_y;
246 struct drm_gem_cma_object *tile_bo;
247 uint32_t tile_alloc_offset;
248
249 /**
250 * Computed addresses pointing into exec_bo where we start the
251 * bin thread (ct0) and render thread (ct1).
252 */
253 uint32_t ct0ca, ct0ea;
254 uint32_t ct1ca, ct1ea;
255
256 /* Pointer to the unvalidated bin CL (if present). */
257 void *bin_u;
258
259 /* Pointers to the shader recs. These paddr gets incremented as CL
260 * packets are relocated in validate_gl_shader_state, and the vaddrs
261 * (u and v) get incremented and size decremented as the shader recs
262 * themselves are validated.
263 */
264 void *shader_rec_u;
265 void *shader_rec_v;
266 uint32_t shader_rec_p;
267 uint32_t shader_rec_size;
268
269 /* Pointers to the uniform data. These pointers are incremented, and
270 * size decremented, as each batch of uniforms is uploaded.
271 */
272 void *uniforms_u;
273 void *uniforms_v;
274 uint32_t uniforms_p;
275 uint32_t uniforms_size;
276};
277
278static inline struct vc4_exec_info *
279vc4_first_job(struct vc4_dev *vc4)
280{
281 if (list_empty(&vc4->job_list))
282 return NULL;
283 return list_first_entry(&vc4->job_list, struct vc4_exec_info, head);
284}
285
286/**
287 * struct vc4_texture_sample_info - saves the offsets into the UBO for texture
288 * setup parameters.
289 *
290 * This will be used at draw time to relocate the reference to the texture
291 * contents in p0, and validate that the offset combined with
292 * width/height/stride/etc. from p1 and p2/p3 doesn't sample outside the BO.
293 * Note that the hardware treats unprovided config parameters as 0, so not all
294 * of them need to be set up for every texure sample, and we'll store ~0 as
295 * the offset to mark the unused ones.
296 *
297 * See the VC4 3D architecture guide page 41 ("Texture and Memory Lookup Unit
298 * Setup") for definitions of the texture parameters.
299 */
300struct vc4_texture_sample_info {
301 bool is_direct;
302 uint32_t p_offset[4];
303};
304
305/**
306 * struct vc4_validated_shader_info - information about validated shaders that
307 * needs to be used from command list validation.
308 *
309 * For a given shader, each time a shader state record references it, we need
310 * to verify that the shader doesn't read more uniforms than the shader state
311 * record's uniform BO pointer can provide, and we need to apply relocations
312 * and validate the shader state record's uniforms that define the texture
313 * samples.
314 */
315struct vc4_validated_shader_info {
316 uint32_t uniforms_size;
317 uint32_t uniforms_src_size;
318 uint32_t num_texture_samples;
319 struct vc4_texture_sample_info *texture_samples;
320};
321
78/** 322/**
79 * _wait_for - magic (register) wait macro 323 * _wait_for - magic (register) wait macro
80 * 324 *
@@ -104,13 +348,29 @@ to_vc4_encoder(struct drm_encoder *encoder)
104#define wait_for(COND, MS) _wait_for(COND, MS, 1) 348#define wait_for(COND, MS) _wait_for(COND, MS, 1)
105 349
106/* vc4_bo.c */ 350/* vc4_bo.c */
351struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size);
107void vc4_free_object(struct drm_gem_object *gem_obj); 352void vc4_free_object(struct drm_gem_object *gem_obj);
108struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size); 353struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size,
354 bool from_cache);
109int vc4_dumb_create(struct drm_file *file_priv, 355int vc4_dumb_create(struct drm_file *file_priv,
110 struct drm_device *dev, 356 struct drm_device *dev,
111 struct drm_mode_create_dumb *args); 357 struct drm_mode_create_dumb *args);
112struct dma_buf *vc4_prime_export(struct drm_device *dev, 358struct dma_buf *vc4_prime_export(struct drm_device *dev,
113 struct drm_gem_object *obj, int flags); 359 struct drm_gem_object *obj, int flags);
360int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
361 struct drm_file *file_priv);
362int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
363 struct drm_file *file_priv);
364int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
365 struct drm_file *file_priv);
366int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
367 struct drm_file *file_priv);
368int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
369int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
370void *vc4_prime_vmap(struct drm_gem_object *obj);
371void vc4_bo_cache_init(struct drm_device *dev);
372void vc4_bo_cache_destroy(struct drm_device *dev);
373int vc4_bo_stats_debugfs(struct seq_file *m, void *arg);
114 374
115/* vc4_crtc.c */ 375/* vc4_crtc.c */
116extern struct platform_driver vc4_crtc_driver; 376extern struct platform_driver vc4_crtc_driver;
@@ -126,10 +386,34 @@ void vc4_debugfs_cleanup(struct drm_minor *minor);
126/* vc4_drv.c */ 386/* vc4_drv.c */
127void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index); 387void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index);
128 388
389/* vc4_gem.c */
390void vc4_gem_init(struct drm_device *dev);
391void vc4_gem_destroy(struct drm_device *dev);
392int vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
393 struct drm_file *file_priv);
394int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
395 struct drm_file *file_priv);
396int vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
397 struct drm_file *file_priv);
398void vc4_submit_next_job(struct drm_device *dev);
399int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno,
400 uint64_t timeout_ns, bool interruptible);
401void vc4_job_handle_completed(struct vc4_dev *vc4);
402int vc4_queue_seqno_cb(struct drm_device *dev,
403 struct vc4_seqno_cb *cb, uint64_t seqno,
404 void (*func)(struct vc4_seqno_cb *cb));
405
129/* vc4_hdmi.c */ 406/* vc4_hdmi.c */
130extern struct platform_driver vc4_hdmi_driver; 407extern struct platform_driver vc4_hdmi_driver;
131int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused); 408int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused);
132 409
410/* vc4_irq.c */
411irqreturn_t vc4_irq(int irq, void *arg);
412void vc4_irq_preinstall(struct drm_device *dev);
413int vc4_irq_postinstall(struct drm_device *dev);
414void vc4_irq_uninstall(struct drm_device *dev);
415void vc4_irq_reset(struct drm_device *dev);
416
133/* vc4_hvs.c */ 417/* vc4_hvs.c */
134extern struct platform_driver vc4_hvs_driver; 418extern struct platform_driver vc4_hvs_driver;
135void vc4_hvs_dump_state(struct drm_device *dev); 419void vc4_hvs_dump_state(struct drm_device *dev);
@@ -143,3 +427,35 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
143 enum drm_plane_type type); 427 enum drm_plane_type type);
144u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist); 428u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist);
145u32 vc4_plane_dlist_size(struct drm_plane_state *state); 429u32 vc4_plane_dlist_size(struct drm_plane_state *state);
430void vc4_plane_async_set_fb(struct drm_plane *plane,
431 struct drm_framebuffer *fb);
432
433/* vc4_v3d.c */
434extern struct platform_driver vc4_v3d_driver;
435int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused);
436int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused);
437int vc4_v3d_set_power(struct vc4_dev *vc4, bool on);
438
439/* vc4_validate.c */
440int
441vc4_validate_bin_cl(struct drm_device *dev,
442 void *validated,
443 void *unvalidated,
444 struct vc4_exec_info *exec);
445
446int
447vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec);
448
449struct drm_gem_cma_object *vc4_use_bo(struct vc4_exec_info *exec,
450 uint32_t hindex);
451
452int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec);
453
454bool vc4_check_tex_size(struct vc4_exec_info *exec,
455 struct drm_gem_cma_object *fbo,
456 uint32_t offset, uint8_t tiling_format,
457 uint32_t width, uint32_t height, uint8_t cpp);
458
459/* vc4_validate_shader.c */
460struct vc4_validated_shader_info *
461vc4_validate_shader(struct drm_gem_cma_object *shader_obj);
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
new file mode 100644
index 000000000000..39f29e759334
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -0,0 +1,867 @@
1/*
2 * Copyright © 2014 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include <linux/module.h>
25#include <linux/platform_device.h>
26#include <linux/device.h>
27#include <linux/io.h>
28
29#include "uapi/drm/vc4_drm.h"
30#include "vc4_drv.h"
31#include "vc4_regs.h"
32#include "vc4_trace.h"
33
34static void
35vc4_queue_hangcheck(struct drm_device *dev)
36{
37 struct vc4_dev *vc4 = to_vc4_dev(dev);
38
39 mod_timer(&vc4->hangcheck.timer,
40 round_jiffies_up(jiffies + msecs_to_jiffies(100)));
41}
42
43struct vc4_hang_state {
44 struct drm_vc4_get_hang_state user_state;
45
46 u32 bo_count;
47 struct drm_gem_object **bo;
48};
49
50static void
51vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state)
52{
53 unsigned int i;
54
55 mutex_lock(&dev->struct_mutex);
56 for (i = 0; i < state->user_state.bo_count; i++)
57 drm_gem_object_unreference(state->bo[i]);
58 mutex_unlock(&dev->struct_mutex);
59
60 kfree(state);
61}
62
63int
64vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
65 struct drm_file *file_priv)
66{
67 struct drm_vc4_get_hang_state *get_state = data;
68 struct drm_vc4_get_hang_state_bo *bo_state;
69 struct vc4_hang_state *kernel_state;
70 struct drm_vc4_get_hang_state *state;
71 struct vc4_dev *vc4 = to_vc4_dev(dev);
72 unsigned long irqflags;
73 u32 i;
74 int ret;
75
76 spin_lock_irqsave(&vc4->job_lock, irqflags);
77 kernel_state = vc4->hang_state;
78 if (!kernel_state) {
79 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
80 return -ENOENT;
81 }
82 state = &kernel_state->user_state;
83
84 /* If the user's array isn't big enough, just return the
85 * required array size.
86 */
87 if (get_state->bo_count < state->bo_count) {
88 get_state->bo_count = state->bo_count;
89 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
90 return 0;
91 }
92
93 vc4->hang_state = NULL;
94 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
95
96 /* Save the user's BO pointer, so we don't stomp it with the memcpy. */
97 state->bo = get_state->bo;
98 memcpy(get_state, state, sizeof(*state));
99
100 bo_state = kcalloc(state->bo_count, sizeof(*bo_state), GFP_KERNEL);
101 if (!bo_state) {
102 ret = -ENOMEM;
103 goto err_free;
104 }
105
106 for (i = 0; i < state->bo_count; i++) {
107 struct vc4_bo *vc4_bo = to_vc4_bo(kernel_state->bo[i]);
108 u32 handle;
109
110 ret = drm_gem_handle_create(file_priv, kernel_state->bo[i],
111 &handle);
112
113 if (ret) {
114 state->bo_count = i - 1;
115 goto err;
116 }
117 bo_state[i].handle = handle;
118 bo_state[i].paddr = vc4_bo->base.paddr;
119 bo_state[i].size = vc4_bo->base.base.size;
120 }
121
122 ret = copy_to_user((void __user *)(uintptr_t)get_state->bo,
123 bo_state,
124 state->bo_count * sizeof(*bo_state));
125 kfree(bo_state);
126
127err_free:
128
129 vc4_free_hang_state(dev, kernel_state);
130
131err:
132 return ret;
133}
134
135static void
136vc4_save_hang_state(struct drm_device *dev)
137{
138 struct vc4_dev *vc4 = to_vc4_dev(dev);
139 struct drm_vc4_get_hang_state *state;
140 struct vc4_hang_state *kernel_state;
141 struct vc4_exec_info *exec;
142 struct vc4_bo *bo;
143 unsigned long irqflags;
144 unsigned int i, unref_list_count;
145
146 kernel_state = kcalloc(1, sizeof(*state), GFP_KERNEL);
147 if (!kernel_state)
148 return;
149
150 state = &kernel_state->user_state;
151
152 spin_lock_irqsave(&vc4->job_lock, irqflags);
153 exec = vc4_first_job(vc4);
154 if (!exec) {
155 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
156 return;
157 }
158
159 unref_list_count = 0;
160 list_for_each_entry(bo, &exec->unref_list, unref_head)
161 unref_list_count++;
162
163 state->bo_count = exec->bo_count + unref_list_count;
164 kernel_state->bo = kcalloc(state->bo_count, sizeof(*kernel_state->bo),
165 GFP_ATOMIC);
166 if (!kernel_state->bo) {
167 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
168 return;
169 }
170
171 for (i = 0; i < exec->bo_count; i++) {
172 drm_gem_object_reference(&exec->bo[i]->base);
173 kernel_state->bo[i] = &exec->bo[i]->base;
174 }
175
176 list_for_each_entry(bo, &exec->unref_list, unref_head) {
177 drm_gem_object_reference(&bo->base.base);
178 kernel_state->bo[i] = &bo->base.base;
179 i++;
180 }
181
182 state->start_bin = exec->ct0ca;
183 state->start_render = exec->ct1ca;
184
185 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
186
187 state->ct0ca = V3D_READ(V3D_CTNCA(0));
188 state->ct0ea = V3D_READ(V3D_CTNEA(0));
189
190 state->ct1ca = V3D_READ(V3D_CTNCA(1));
191 state->ct1ea = V3D_READ(V3D_CTNEA(1));
192
193 state->ct0cs = V3D_READ(V3D_CTNCS(0));
194 state->ct1cs = V3D_READ(V3D_CTNCS(1));
195
196 state->ct0ra0 = V3D_READ(V3D_CT00RA0);
197 state->ct1ra0 = V3D_READ(V3D_CT01RA0);
198
199 state->bpca = V3D_READ(V3D_BPCA);
200 state->bpcs = V3D_READ(V3D_BPCS);
201 state->bpoa = V3D_READ(V3D_BPOA);
202 state->bpos = V3D_READ(V3D_BPOS);
203
204 state->vpmbase = V3D_READ(V3D_VPMBASE);
205
206 state->dbge = V3D_READ(V3D_DBGE);
207 state->fdbgo = V3D_READ(V3D_FDBGO);
208 state->fdbgb = V3D_READ(V3D_FDBGB);
209 state->fdbgr = V3D_READ(V3D_FDBGR);
210 state->fdbgs = V3D_READ(V3D_FDBGS);
211 state->errstat = V3D_READ(V3D_ERRSTAT);
212
213 spin_lock_irqsave(&vc4->job_lock, irqflags);
214 if (vc4->hang_state) {
215 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
216 vc4_free_hang_state(dev, kernel_state);
217 } else {
218 vc4->hang_state = kernel_state;
219 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
220 }
221}
222
223static void
224vc4_reset(struct drm_device *dev)
225{
226 struct vc4_dev *vc4 = to_vc4_dev(dev);
227
228 DRM_INFO("Resetting GPU.\n");
229 vc4_v3d_set_power(vc4, false);
230 vc4_v3d_set_power(vc4, true);
231
232 vc4_irq_reset(dev);
233
234 /* Rearm the hangcheck -- another job might have been waiting
235 * for our hung one to get kicked off, and vc4_irq_reset()
236 * would have started it.
237 */
238 vc4_queue_hangcheck(dev);
239}
240
241static void
242vc4_reset_work(struct work_struct *work)
243{
244 struct vc4_dev *vc4 =
245 container_of(work, struct vc4_dev, hangcheck.reset_work);
246
247 vc4_save_hang_state(vc4->dev);
248
249 vc4_reset(vc4->dev);
250}
251
252static void
253vc4_hangcheck_elapsed(unsigned long data)
254{
255 struct drm_device *dev = (struct drm_device *)data;
256 struct vc4_dev *vc4 = to_vc4_dev(dev);
257 uint32_t ct0ca, ct1ca;
258
259 /* If idle, we can stop watching for hangs. */
260 if (list_empty(&vc4->job_list))
261 return;
262
263 ct0ca = V3D_READ(V3D_CTNCA(0));
264 ct1ca = V3D_READ(V3D_CTNCA(1));
265
266 /* If we've made any progress in execution, rearm the timer
267 * and wait.
268 */
269 if (ct0ca != vc4->hangcheck.last_ct0ca ||
270 ct1ca != vc4->hangcheck.last_ct1ca) {
271 vc4->hangcheck.last_ct0ca = ct0ca;
272 vc4->hangcheck.last_ct1ca = ct1ca;
273 vc4_queue_hangcheck(dev);
274 return;
275 }
276
277 /* We've gone too long with no progress, reset. This has to
278 * be done from a work struct, since resetting can sleep and
279 * this timer hook isn't allowed to.
280 */
281 schedule_work(&vc4->hangcheck.reset_work);
282}
283
284static void
285submit_cl(struct drm_device *dev, uint32_t thread, uint32_t start, uint32_t end)
286{
287 struct vc4_dev *vc4 = to_vc4_dev(dev);
288
289 /* Set the current and end address of the control list.
290 * Writing the end register is what starts the job.
291 */
292 V3D_WRITE(V3D_CTNCA(thread), start);
293 V3D_WRITE(V3D_CTNEA(thread), end);
294}
295
296int
297vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns,
298 bool interruptible)
299{
300 struct vc4_dev *vc4 = to_vc4_dev(dev);
301 int ret = 0;
302 unsigned long timeout_expire;
303 DEFINE_WAIT(wait);
304
305 if (vc4->finished_seqno >= seqno)
306 return 0;
307
308 if (timeout_ns == 0)
309 return -ETIME;
310
311 timeout_expire = jiffies + nsecs_to_jiffies(timeout_ns);
312
313 trace_vc4_wait_for_seqno_begin(dev, seqno, timeout_ns);
314 for (;;) {
315 prepare_to_wait(&vc4->job_wait_queue, &wait,
316 interruptible ? TASK_INTERRUPTIBLE :
317 TASK_UNINTERRUPTIBLE);
318
319 if (interruptible && signal_pending(current)) {
320 ret = -ERESTARTSYS;
321 break;
322 }
323
324 if (vc4->finished_seqno >= seqno)
325 break;
326
327 if (timeout_ns != ~0ull) {
328 if (time_after_eq(jiffies, timeout_expire)) {
329 ret = -ETIME;
330 break;
331 }
332 schedule_timeout(timeout_expire - jiffies);
333 } else {
334 schedule();
335 }
336 }
337
338 finish_wait(&vc4->job_wait_queue, &wait);
339 trace_vc4_wait_for_seqno_end(dev, seqno);
340
341 if (ret && ret != -ERESTARTSYS) {
342 DRM_ERROR("timeout waiting for render thread idle\n");
343 return ret;
344 }
345
346 return 0;
347}
348
349static void
350vc4_flush_caches(struct drm_device *dev)
351{
352 struct vc4_dev *vc4 = to_vc4_dev(dev);
353
354 /* Flush the GPU L2 caches. These caches sit on top of system
355 * L3 (the 128kb or so shared with the CPU), and are
356 * non-allocating in the L3.
357 */
358 V3D_WRITE(V3D_L2CACTL,
359 V3D_L2CACTL_L2CCLR);
360
361 V3D_WRITE(V3D_SLCACTL,
362 VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
363 VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC) |
364 VC4_SET_FIELD(0xf, V3D_SLCACTL_UCC) |
365 VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC));
366}
367
368/* Sets the registers for the next job to be actually be executed in
369 * the hardware.
370 *
371 * The job_lock should be held during this.
372 */
373void
374vc4_submit_next_job(struct drm_device *dev)
375{
376 struct vc4_dev *vc4 = to_vc4_dev(dev);
377 struct vc4_exec_info *exec = vc4_first_job(vc4);
378
379 if (!exec)
380 return;
381
382 vc4_flush_caches(dev);
383
384 /* Disable the binner's pre-loaded overflow memory address */
385 V3D_WRITE(V3D_BPOA, 0);
386 V3D_WRITE(V3D_BPOS, 0);
387
388 if (exec->ct0ca != exec->ct0ea)
389 submit_cl(dev, 0, exec->ct0ca, exec->ct0ea);
390 submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
391}
392
393static void
394vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
395{
396 struct vc4_bo *bo;
397 unsigned i;
398
399 for (i = 0; i < exec->bo_count; i++) {
400 bo = to_vc4_bo(&exec->bo[i]->base);
401 bo->seqno = seqno;
402 }
403
404 list_for_each_entry(bo, &exec->unref_list, unref_head) {
405 bo->seqno = seqno;
406 }
407}
408
409/* Queues a struct vc4_exec_info for execution. If no job is
410 * currently executing, then submits it.
411 *
412 * Unlike most GPUs, our hardware only handles one command list at a
413 * time. To queue multiple jobs at once, we'd need to edit the
414 * previous command list to have a jump to the new one at the end, and
415 * then bump the end address. That's a change for a later date,
416 * though.
417 */
418static void
419vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec)
420{
421 struct vc4_dev *vc4 = to_vc4_dev(dev);
422 uint64_t seqno;
423 unsigned long irqflags;
424
425 spin_lock_irqsave(&vc4->job_lock, irqflags);
426
427 seqno = ++vc4->emit_seqno;
428 exec->seqno = seqno;
429 vc4_update_bo_seqnos(exec, seqno);
430
431 list_add_tail(&exec->head, &vc4->job_list);
432
433 /* If no job was executing, kick ours off. Otherwise, it'll
434 * get started when the previous job's frame done interrupt
435 * occurs.
436 */
437 if (vc4_first_job(vc4) == exec) {
438 vc4_submit_next_job(dev);
439 vc4_queue_hangcheck(dev);
440 }
441
442 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
443}
444
445/**
446 * Looks up a bunch of GEM handles for BOs and stores the array for
447 * use in the command validator that actually writes relocated
448 * addresses pointing to them.
449 */
450static int
451vc4_cl_lookup_bos(struct drm_device *dev,
452 struct drm_file *file_priv,
453 struct vc4_exec_info *exec)
454{
455 struct drm_vc4_submit_cl *args = exec->args;
456 uint32_t *handles;
457 int ret = 0;
458 int i;
459
460 exec->bo_count = args->bo_handle_count;
461
462 if (!exec->bo_count) {
463 /* See comment on bo_index for why we have to check
464 * this.
465 */
466 DRM_ERROR("Rendering requires BOs to validate\n");
467 return -EINVAL;
468 }
469
470 exec->bo = kcalloc(exec->bo_count, sizeof(struct drm_gem_cma_object *),
471 GFP_KERNEL);
472 if (!exec->bo) {
473 DRM_ERROR("Failed to allocate validated BO pointers\n");
474 return -ENOMEM;
475 }
476
477 handles = drm_malloc_ab(exec->bo_count, sizeof(uint32_t));
478 if (!handles) {
479 DRM_ERROR("Failed to allocate incoming GEM handles\n");
480 goto fail;
481 }
482
483 ret = copy_from_user(handles,
484 (void __user *)(uintptr_t)args->bo_handles,
485 exec->bo_count * sizeof(uint32_t));
486 if (ret) {
487 DRM_ERROR("Failed to copy in GEM handles\n");
488 goto fail;
489 }
490
491 spin_lock(&file_priv->table_lock);
492 for (i = 0; i < exec->bo_count; i++) {
493 struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
494 handles[i]);
495 if (!bo) {
496 DRM_ERROR("Failed to look up GEM BO %d: %d\n",
497 i, handles[i]);
498 ret = -EINVAL;
499 spin_unlock(&file_priv->table_lock);
500 goto fail;
501 }
502 drm_gem_object_reference(bo);
503 exec->bo[i] = (struct drm_gem_cma_object *)bo;
504 }
505 spin_unlock(&file_priv->table_lock);
506
507fail:
508 kfree(handles);
509 return 0;
510}
511
512static int
513vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
514{
515 struct drm_vc4_submit_cl *args = exec->args;
516 void *temp = NULL;
517 void *bin;
518 int ret = 0;
519 uint32_t bin_offset = 0;
520 uint32_t shader_rec_offset = roundup(bin_offset + args->bin_cl_size,
521 16);
522 uint32_t uniforms_offset = shader_rec_offset + args->shader_rec_size;
523 uint32_t exec_size = uniforms_offset + args->uniforms_size;
524 uint32_t temp_size = exec_size + (sizeof(struct vc4_shader_state) *
525 args->shader_rec_count);
526 struct vc4_bo *bo;
527
528 if (uniforms_offset < shader_rec_offset ||
529 exec_size < uniforms_offset ||
530 args->shader_rec_count >= (UINT_MAX /
531 sizeof(struct vc4_shader_state)) ||
532 temp_size < exec_size) {
533 DRM_ERROR("overflow in exec arguments\n");
534 goto fail;
535 }
536
537 /* Allocate space where we'll store the copied in user command lists
538 * and shader records.
539 *
540 * We don't just copy directly into the BOs because we need to
541 * read the contents back for validation, and I think the
542 * bo->vaddr is uncached access.
543 */
544 temp = kmalloc(temp_size, GFP_KERNEL);
545 if (!temp) {
546 DRM_ERROR("Failed to allocate storage for copying "
547 "in bin/render CLs.\n");
548 ret = -ENOMEM;
549 goto fail;
550 }
551 bin = temp + bin_offset;
552 exec->shader_rec_u = temp + shader_rec_offset;
553 exec->uniforms_u = temp + uniforms_offset;
554 exec->shader_state = temp + exec_size;
555 exec->shader_state_size = args->shader_rec_count;
556
557 ret = copy_from_user(bin,
558 (void __user *)(uintptr_t)args->bin_cl,
559 args->bin_cl_size);
560 if (ret) {
561 DRM_ERROR("Failed to copy in bin cl\n");
562 goto fail;
563 }
564
565 ret = copy_from_user(exec->shader_rec_u,
566 (void __user *)(uintptr_t)args->shader_rec,
567 args->shader_rec_size);
568 if (ret) {
569 DRM_ERROR("Failed to copy in shader recs\n");
570 goto fail;
571 }
572
573 ret = copy_from_user(exec->uniforms_u,
574 (void __user *)(uintptr_t)args->uniforms,
575 args->uniforms_size);
576 if (ret) {
577 DRM_ERROR("Failed to copy in uniforms cl\n");
578 goto fail;
579 }
580
581 bo = vc4_bo_create(dev, exec_size, true);
582 if (!bo) {
583 DRM_ERROR("Couldn't allocate BO for binning\n");
584 ret = PTR_ERR(exec->exec_bo);
585 goto fail;
586 }
587 exec->exec_bo = &bo->base;
588
589 list_add_tail(&to_vc4_bo(&exec->exec_bo->base)->unref_head,
590 &exec->unref_list);
591
592 exec->ct0ca = exec->exec_bo->paddr + bin_offset;
593
594 exec->bin_u = bin;
595
596 exec->shader_rec_v = exec->exec_bo->vaddr + shader_rec_offset;
597 exec->shader_rec_p = exec->exec_bo->paddr + shader_rec_offset;
598 exec->shader_rec_size = args->shader_rec_size;
599
600 exec->uniforms_v = exec->exec_bo->vaddr + uniforms_offset;
601 exec->uniforms_p = exec->exec_bo->paddr + uniforms_offset;
602 exec->uniforms_size = args->uniforms_size;
603
604 ret = vc4_validate_bin_cl(dev,
605 exec->exec_bo->vaddr + bin_offset,
606 bin,
607 exec);
608 if (ret)
609 goto fail;
610
611 ret = vc4_validate_shader_recs(dev, exec);
612
613fail:
614 kfree(temp);
615 return ret;
616}
617
618static void
619vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
620{
621 unsigned i;
622
623 /* Need the struct lock for drm_gem_object_unreference(). */
624 mutex_lock(&dev->struct_mutex);
625 if (exec->bo) {
626 for (i = 0; i < exec->bo_count; i++)
627 drm_gem_object_unreference(&exec->bo[i]->base);
628 kfree(exec->bo);
629 }
630
631 while (!list_empty(&exec->unref_list)) {
632 struct vc4_bo *bo = list_first_entry(&exec->unref_list,
633 struct vc4_bo, unref_head);
634 list_del(&bo->unref_head);
635 drm_gem_object_unreference(&bo->base.base);
636 }
637 mutex_unlock(&dev->struct_mutex);
638
639 kfree(exec);
640}
641
642void
643vc4_job_handle_completed(struct vc4_dev *vc4)
644{
645 unsigned long irqflags;
646 struct vc4_seqno_cb *cb, *cb_temp;
647
648 spin_lock_irqsave(&vc4->job_lock, irqflags);
649 while (!list_empty(&vc4->job_done_list)) {
650 struct vc4_exec_info *exec =
651 list_first_entry(&vc4->job_done_list,
652 struct vc4_exec_info, head);
653 list_del(&exec->head);
654
655 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
656 vc4_complete_exec(vc4->dev, exec);
657 spin_lock_irqsave(&vc4->job_lock, irqflags);
658 }
659
660 list_for_each_entry_safe(cb, cb_temp, &vc4->seqno_cb_list, work.entry) {
661 if (cb->seqno <= vc4->finished_seqno) {
662 list_del_init(&cb->work.entry);
663 schedule_work(&cb->work);
664 }
665 }
666
667 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
668}
669
670static void vc4_seqno_cb_work(struct work_struct *work)
671{
672 struct vc4_seqno_cb *cb = container_of(work, struct vc4_seqno_cb, work);
673
674 cb->func(cb);
675}
676
677int vc4_queue_seqno_cb(struct drm_device *dev,
678 struct vc4_seqno_cb *cb, uint64_t seqno,
679 void (*func)(struct vc4_seqno_cb *cb))
680{
681 struct vc4_dev *vc4 = to_vc4_dev(dev);
682 int ret = 0;
683 unsigned long irqflags;
684
685 cb->func = func;
686 INIT_WORK(&cb->work, vc4_seqno_cb_work);
687
688 spin_lock_irqsave(&vc4->job_lock, irqflags);
689 if (seqno > vc4->finished_seqno) {
690 cb->seqno = seqno;
691 list_add_tail(&cb->work.entry, &vc4->seqno_cb_list);
692 } else {
693 schedule_work(&cb->work);
694 }
695 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
696
697 return ret;
698}
699
700/* Scheduled when any job has been completed, this walks the list of
701 * jobs that had completed and unrefs their BOs and frees their exec
702 * structs.
703 */
704static void
705vc4_job_done_work(struct work_struct *work)
706{
707 struct vc4_dev *vc4 =
708 container_of(work, struct vc4_dev, job_done_work);
709
710 vc4_job_handle_completed(vc4);
711}
712
713static int
714vc4_wait_for_seqno_ioctl_helper(struct drm_device *dev,
715 uint64_t seqno,
716 uint64_t *timeout_ns)
717{
718 unsigned long start = jiffies;
719 int ret = vc4_wait_for_seqno(dev, seqno, *timeout_ns, true);
720
721 if ((ret == -EINTR || ret == -ERESTARTSYS) && *timeout_ns != ~0ull) {
722 uint64_t delta = jiffies_to_nsecs(jiffies - start);
723
724 if (*timeout_ns >= delta)
725 *timeout_ns -= delta;
726 }
727
728 return ret;
729}
730
731int
732vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
733 struct drm_file *file_priv)
734{
735 struct drm_vc4_wait_seqno *args = data;
736
737 return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno,
738 &args->timeout_ns);
739}
740
741int
742vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
743 struct drm_file *file_priv)
744{
745 int ret;
746 struct drm_vc4_wait_bo *args = data;
747 struct drm_gem_object *gem_obj;
748 struct vc4_bo *bo;
749
750 gem_obj = drm_gem_object_lookup(dev, file_priv, args->handle);
751 if (!gem_obj) {
752 DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
753 return -EINVAL;
754 }
755 bo = to_vc4_bo(gem_obj);
756
757 ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno,
758 &args->timeout_ns);
759
760 drm_gem_object_unreference_unlocked(gem_obj);
761 return ret;
762}
763
764/**
765 * Submits a command list to the VC4.
766 *
767 * This is what is called batchbuffer emitting on other hardware.
768 */
769int
770vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
771 struct drm_file *file_priv)
772{
773 struct vc4_dev *vc4 = to_vc4_dev(dev);
774 struct drm_vc4_submit_cl *args = data;
775 struct vc4_exec_info *exec;
776 int ret;
777
778 if ((args->flags & ~VC4_SUBMIT_CL_USE_CLEAR_COLOR) != 0) {
779 DRM_ERROR("Unknown flags: 0x%02x\n", args->flags);
780 return -EINVAL;
781 }
782
783 exec = kcalloc(1, sizeof(*exec), GFP_KERNEL);
784 if (!exec) {
785 DRM_ERROR("malloc failure on exec struct\n");
786 return -ENOMEM;
787 }
788
789 exec->args = args;
790 INIT_LIST_HEAD(&exec->unref_list);
791
792 ret = vc4_cl_lookup_bos(dev, file_priv, exec);
793 if (ret)
794 goto fail;
795
796 if (exec->args->bin_cl_size != 0) {
797 ret = vc4_get_bcl(dev, exec);
798 if (ret)
799 goto fail;
800 } else {
801 exec->ct0ca = 0;
802 exec->ct0ea = 0;
803 }
804
805 ret = vc4_get_rcl(dev, exec);
806 if (ret)
807 goto fail;
808
809 /* Clear this out of the struct we'll be putting in the queue,
810 * since it's part of our stack.
811 */
812 exec->args = NULL;
813
814 vc4_queue_submit(dev, exec);
815
816 /* Return the seqno for our job. */
817 args->seqno = vc4->emit_seqno;
818
819 return 0;
820
821fail:
822 vc4_complete_exec(vc4->dev, exec);
823
824 return ret;
825}
826
827void
828vc4_gem_init(struct drm_device *dev)
829{
830 struct vc4_dev *vc4 = to_vc4_dev(dev);
831
832 INIT_LIST_HEAD(&vc4->job_list);
833 INIT_LIST_HEAD(&vc4->job_done_list);
834 INIT_LIST_HEAD(&vc4->seqno_cb_list);
835 spin_lock_init(&vc4->job_lock);
836
837 INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work);
838 setup_timer(&vc4->hangcheck.timer,
839 vc4_hangcheck_elapsed,
840 (unsigned long)dev);
841
842 INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
843}
844
845void
846vc4_gem_destroy(struct drm_device *dev)
847{
848 struct vc4_dev *vc4 = to_vc4_dev(dev);
849
850 /* Waiting for exec to finish would need to be done before
851 * unregistering V3D.
852 */
853 WARN_ON(vc4->emit_seqno != vc4->finished_seqno);
854
855 /* V3D should already have disabled its interrupt and cleared
856 * the overflow allocation registers. Now free the object.
857 */
858 if (vc4->overflow_mem) {
859 drm_gem_object_unreference_unlocked(&vc4->overflow_mem->base.base);
860 vc4->overflow_mem = NULL;
861 }
862
863 vc4_bo_cache_destroy(dev);
864
865 if (vc4->hang_state)
866 vc4_free_hang_state(dev, vc4->hang_state);
867}
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index da9a36d6e1d1..c69c0460196b 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -519,7 +519,7 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
519 WARN_ON_ONCE((HD_READ(VC4_HD_M_CTL) & VC4_HD_M_ENABLE) == 0); 519 WARN_ON_ONCE((HD_READ(VC4_HD_M_CTL) & VC4_HD_M_ENABLE) == 0);
520 520
521 drm_encoder_init(drm, hdmi->encoder, &vc4_hdmi_encoder_funcs, 521 drm_encoder_init(drm, hdmi->encoder, &vc4_hdmi_encoder_funcs,
522 DRM_MODE_ENCODER_TMDS); 522 DRM_MODE_ENCODER_TMDS, NULL);
523 drm_encoder_helper_add(hdmi->encoder, &vc4_hdmi_encoder_helper_funcs); 523 drm_encoder_helper_add(hdmi->encoder, &vc4_hdmi_encoder_helper_funcs);
524 524
525 hdmi->connector = vc4_hdmi_connector_init(drm, hdmi->encoder); 525 hdmi->connector = vc4_hdmi_connector_init(drm, hdmi->encoder);
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index ab1673f672a4..8098c5b21ba4 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -75,10 +75,10 @@ void vc4_hvs_dump_state(struct drm_device *dev)
75 for (i = 0; i < 64; i += 4) { 75 for (i = 0; i < 64; i += 4) {
76 DRM_INFO("0x%08x (%s): 0x%08x 0x%08x 0x%08x 0x%08x\n", 76 DRM_INFO("0x%08x (%s): 0x%08x 0x%08x 0x%08x 0x%08x\n",
77 i * 4, i < HVS_BOOTLOADER_DLIST_END ? "B" : "D", 77 i * 4, i < HVS_BOOTLOADER_DLIST_END ? "B" : "D",
78 ((uint32_t *)vc4->hvs->dlist)[i + 0], 78 readl((u32 __iomem *)vc4->hvs->dlist + i + 0),
79 ((uint32_t *)vc4->hvs->dlist)[i + 1], 79 readl((u32 __iomem *)vc4->hvs->dlist + i + 1),
80 ((uint32_t *)vc4->hvs->dlist)[i + 2], 80 readl((u32 __iomem *)vc4->hvs->dlist + i + 2),
81 ((uint32_t *)vc4->hvs->dlist)[i + 3]); 81 readl((u32 __iomem *)vc4->hvs->dlist + i + 3));
82 } 82 }
83} 83}
84 84
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
new file mode 100644
index 000000000000..b68060e758db
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -0,0 +1,210 @@
1/*
2 * Copyright © 2014 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24/** DOC: Interrupt management for the V3D engine.
25 *
26 * We have an interrupt status register (V3D_INTCTL) which reports
27 * interrupts, and where writing 1 bits clears those interrupts.
28 * There are also a pair of interrupt registers
29 * (V3D_INTENA/V3D_INTDIS) where writing a 1 to their bits enables or
30 * disables that specific interrupt, and 0s written are ignored
31 * (reading either one returns the set of enabled interrupts).
32 *
33 * When we take a render frame interrupt, we need to wake the
34 * processes waiting for some frame to be done, and get the next frame
35 * submitted ASAP (so the hardware doesn't sit idle when there's work
36 * to do).
37 *
38 * When we take the binner out of memory interrupt, we need to
39 * allocate some new memory and pass it to the binner so that the
40 * current job can make progress.
41 */
42
43#include "vc4_drv.h"
44#include "vc4_regs.h"
45
46#define V3D_DRIVER_IRQS (V3D_INT_OUTOMEM | \
47 V3D_INT_FRDONE)
48
49DECLARE_WAIT_QUEUE_HEAD(render_wait);
50
51static void
52vc4_overflow_mem_work(struct work_struct *work)
53{
54 struct vc4_dev *vc4 =
55 container_of(work, struct vc4_dev, overflow_mem_work);
56 struct drm_device *dev = vc4->dev;
57 struct vc4_bo *bo;
58
59 bo = vc4_bo_create(dev, 256 * 1024, true);
60 if (!bo) {
61 DRM_ERROR("Couldn't allocate binner overflow mem\n");
62 return;
63 }
64
65 /* If there's a job executing currently, then our previous
66 * overflow allocation is getting used in that job and we need
67 * to queue it to be released when the job is done. But if no
68 * job is executing at all, then we can free the old overflow
69 * object direcctly.
70 *
71 * No lock necessary for this pointer since we're the only
72 * ones that update the pointer, and our workqueue won't
73 * reenter.
74 */
75 if (vc4->overflow_mem) {
76 struct vc4_exec_info *current_exec;
77 unsigned long irqflags;
78
79 spin_lock_irqsave(&vc4->job_lock, irqflags);
80 current_exec = vc4_first_job(vc4);
81 if (current_exec) {
82 vc4->overflow_mem->seqno = vc4->finished_seqno + 1;
83 list_add_tail(&vc4->overflow_mem->unref_head,
84 &current_exec->unref_list);
85 vc4->overflow_mem = NULL;
86 }
87 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
88 }
89
90 if (vc4->overflow_mem)
91 drm_gem_object_unreference_unlocked(&vc4->overflow_mem->base.base);
92 vc4->overflow_mem = bo;
93
94 V3D_WRITE(V3D_BPOA, bo->base.paddr);
95 V3D_WRITE(V3D_BPOS, bo->base.base.size);
96 V3D_WRITE(V3D_INTCTL, V3D_INT_OUTOMEM);
97 V3D_WRITE(V3D_INTENA, V3D_INT_OUTOMEM);
98}
99
100static void
101vc4_irq_finish_job(struct drm_device *dev)
102{
103 struct vc4_dev *vc4 = to_vc4_dev(dev);
104 struct vc4_exec_info *exec = vc4_first_job(vc4);
105
106 if (!exec)
107 return;
108
109 vc4->finished_seqno++;
110 list_move_tail(&exec->head, &vc4->job_done_list);
111 vc4_submit_next_job(dev);
112
113 wake_up_all(&vc4->job_wait_queue);
114 schedule_work(&vc4->job_done_work);
115}
116
117irqreturn_t
118vc4_irq(int irq, void *arg)
119{
120 struct drm_device *dev = arg;
121 struct vc4_dev *vc4 = to_vc4_dev(dev);
122 uint32_t intctl;
123 irqreturn_t status = IRQ_NONE;
124
125 barrier();
126 intctl = V3D_READ(V3D_INTCTL);
127
128 /* Acknowledge the interrupts we're handling here. The render
129 * frame done interrupt will be cleared, while OUTOMEM will
130 * stay high until the underlying cause is cleared.
131 */
132 V3D_WRITE(V3D_INTCTL, intctl);
133
134 if (intctl & V3D_INT_OUTOMEM) {
135 /* Disable OUTOMEM until the work is done. */
136 V3D_WRITE(V3D_INTDIS, V3D_INT_OUTOMEM);
137 schedule_work(&vc4->overflow_mem_work);
138 status = IRQ_HANDLED;
139 }
140
141 if (intctl & V3D_INT_FRDONE) {
142 spin_lock(&vc4->job_lock);
143 vc4_irq_finish_job(dev);
144 spin_unlock(&vc4->job_lock);
145 status = IRQ_HANDLED;
146 }
147
148 return status;
149}
150
151void
152vc4_irq_preinstall(struct drm_device *dev)
153{
154 struct vc4_dev *vc4 = to_vc4_dev(dev);
155
156 init_waitqueue_head(&vc4->job_wait_queue);
157 INIT_WORK(&vc4->overflow_mem_work, vc4_overflow_mem_work);
158
159 /* Clear any pending interrupts someone might have left around
160 * for us.
161 */
162 V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
163}
164
165int
166vc4_irq_postinstall(struct drm_device *dev)
167{
168 struct vc4_dev *vc4 = to_vc4_dev(dev);
169
170 /* Enable both the render done and out of memory interrupts. */
171 V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS);
172
173 return 0;
174}
175
176void
177vc4_irq_uninstall(struct drm_device *dev)
178{
179 struct vc4_dev *vc4 = to_vc4_dev(dev);
180
181 /* Disable sending interrupts for our driver's IRQs. */
182 V3D_WRITE(V3D_INTDIS, V3D_DRIVER_IRQS);
183
184 /* Clear any pending interrupts we might have left. */
185 V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
186
187 cancel_work_sync(&vc4->overflow_mem_work);
188}
189
190/** Reinitializes interrupt registers when a GPU reset is performed. */
191void vc4_irq_reset(struct drm_device *dev)
192{
193 struct vc4_dev *vc4 = to_vc4_dev(dev);
194 unsigned long irqflags;
195
196 /* Acknowledge any stale IRQs. */
197 V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
198
199 /*
200 * Turn all our interrupts on. Binner out of memory is the
201 * only one we expect to trigger at this point, since we've
202 * just come from poweron and haven't supplied any overflow
203 * memory yet.
204 */
205 V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS);
206
207 spin_lock_irqsave(&vc4->job_lock, irqflags);
208 vc4_irq_finish_job(dev);
209 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
210}
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 2e5597d10cc6..f95f2df5f8d1 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -15,6 +15,7 @@
15 */ 15 */
16 16
17#include "drm_crtc.h" 17#include "drm_crtc.h"
18#include "drm_atomic.h"
18#include "drm_atomic_helper.h" 19#include "drm_atomic_helper.h"
19#include "drm_crtc_helper.h" 20#include "drm_crtc_helper.h"
20#include "drm_plane_helper.h" 21#include "drm_plane_helper.h"
@@ -29,10 +30,152 @@ static void vc4_output_poll_changed(struct drm_device *dev)
29 drm_fbdev_cma_hotplug_event(vc4->fbdev); 30 drm_fbdev_cma_hotplug_event(vc4->fbdev);
30} 31}
31 32
33struct vc4_commit {
34 struct drm_device *dev;
35 struct drm_atomic_state *state;
36 struct vc4_seqno_cb cb;
37};
38
39static void
40vc4_atomic_complete_commit(struct vc4_commit *c)
41{
42 struct drm_atomic_state *state = c->state;
43 struct drm_device *dev = state->dev;
44 struct vc4_dev *vc4 = to_vc4_dev(dev);
45
46 drm_atomic_helper_commit_modeset_disables(dev, state);
47
48 drm_atomic_helper_commit_planes(dev, state, false);
49
50 drm_atomic_helper_commit_modeset_enables(dev, state);
51
52 drm_atomic_helper_wait_for_vblanks(dev, state);
53
54 drm_atomic_helper_cleanup_planes(dev, state);
55
56 drm_atomic_state_free(state);
57
58 up(&vc4->async_modeset);
59
60 kfree(c);
61}
62
63static void
64vc4_atomic_complete_commit_seqno_cb(struct vc4_seqno_cb *cb)
65{
66 struct vc4_commit *c = container_of(cb, struct vc4_commit, cb);
67
68 vc4_atomic_complete_commit(c);
69}
70
71static struct vc4_commit *commit_init(struct drm_atomic_state *state)
72{
73 struct vc4_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
74
75 if (!c)
76 return NULL;
77 c->dev = state->dev;
78 c->state = state;
79
80 return c;
81}
82
83/**
84 * vc4_atomic_commit - commit validated state object
85 * @dev: DRM device
86 * @state: the driver state object
87 * @async: asynchronous commit
88 *
89 * This function commits a with drm_atomic_helper_check() pre-validated state
90 * object. This can still fail when e.g. the framebuffer reservation fails. For
91 * now this doesn't implement asynchronous commits.
92 *
93 * RETURNS
94 * Zero for success or -errno.
95 */
96static int vc4_atomic_commit(struct drm_device *dev,
97 struct drm_atomic_state *state,
98 bool async)
99{
100 struct vc4_dev *vc4 = to_vc4_dev(dev);
101 int ret;
102 int i;
103 uint64_t wait_seqno = 0;
104 struct vc4_commit *c;
105
106 c = commit_init(state);
107 if (!c)
108 return -ENOMEM;
109
110 /* Make sure that any outstanding modesets have finished. */
111 ret = down_interruptible(&vc4->async_modeset);
112 if (ret) {
113 kfree(c);
114 return ret;
115 }
116
117 ret = drm_atomic_helper_prepare_planes(dev, state);
118 if (ret) {
119 kfree(c);
120 up(&vc4->async_modeset);
121 return ret;
122 }
123
124 for (i = 0; i < dev->mode_config.num_total_plane; i++) {
125 struct drm_plane *plane = state->planes[i];
126 struct drm_plane_state *new_state = state->plane_states[i];
127
128 if (!plane)
129 continue;
130
131 if ((plane->state->fb != new_state->fb) && new_state->fb) {
132 struct drm_gem_cma_object *cma_bo =
133 drm_fb_cma_get_gem_obj(new_state->fb, 0);
134 struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
135
136 wait_seqno = max(bo->seqno, wait_seqno);
137 }
138 }
139
140 /*
141 * This is the point of no return - everything below never fails except
142 * when the hw goes bonghits. Which means we can commit the new state on
143 * the software side now.
144 */
145
146 drm_atomic_helper_swap_state(dev, state);
147
148 /*
149 * Everything below can be run asynchronously without the need to grab
150 * any modeset locks at all under one condition: It must be guaranteed
151 * that the asynchronous work has either been cancelled (if the driver
152 * supports it, which at least requires that the framebuffers get
153 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
154 * before the new state gets committed on the software side with
155 * drm_atomic_helper_swap_state().
156 *
157 * This scheme allows new atomic state updates to be prepared and
158 * checked in parallel to the asynchronous completion of the previous
159 * update. Which is important since compositors need to figure out the
160 * composition of the next frame right after having submitted the
161 * current layout.
162 */
163
164 if (async) {
165 vc4_queue_seqno_cb(dev, &c->cb, wait_seqno,
166 vc4_atomic_complete_commit_seqno_cb);
167 } else {
168 vc4_wait_for_seqno(dev, wait_seqno, ~0ull, false);
169 vc4_atomic_complete_commit(c);
170 }
171
172 return 0;
173}
174
32static const struct drm_mode_config_funcs vc4_mode_funcs = { 175static const struct drm_mode_config_funcs vc4_mode_funcs = {
33 .output_poll_changed = vc4_output_poll_changed, 176 .output_poll_changed = vc4_output_poll_changed,
34 .atomic_check = drm_atomic_helper_check, 177 .atomic_check = drm_atomic_helper_check,
35 .atomic_commit = drm_atomic_helper_commit, 178 .atomic_commit = vc4_atomic_commit,
36 .fb_create = drm_fb_cma_create, 179 .fb_create = drm_fb_cma_create,
37}; 180};
38 181
@@ -41,6 +184,8 @@ int vc4_kms_load(struct drm_device *dev)
41 struct vc4_dev *vc4 = to_vc4_dev(dev); 184 struct vc4_dev *vc4 = to_vc4_dev(dev);
42 int ret; 185 int ret;
43 186
187 sema_init(&vc4->async_modeset, 1);
188
44 ret = drm_vblank_init(dev, dev->mode_config.num_crtc); 189 ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
45 if (ret < 0) { 190 if (ret < 0) {
46 dev_err(dev->dev, "failed to initialize vblank\n"); 191 dev_err(dev->dev, "failed to initialize vblank\n");
@@ -51,6 +196,8 @@ int vc4_kms_load(struct drm_device *dev)
51 dev->mode_config.max_height = 2048; 196 dev->mode_config.max_height = 2048;
52 dev->mode_config.funcs = &vc4_mode_funcs; 197 dev->mode_config.funcs = &vc4_mode_funcs;
53 dev->mode_config.preferred_depth = 24; 198 dev->mode_config.preferred_depth = 24;
199 dev->mode_config.async_page_flip = true;
200
54 dev->vblank_disable_allowed = true; 201 dev->vblank_disable_allowed = true;
55 202
56 drm_mode_config_reset(dev); 203 drm_mode_config_reset(dev);
diff --git a/drivers/gpu/drm/vc4/vc4_packet.h b/drivers/gpu/drm/vc4/vc4_packet.h
new file mode 100644
index 000000000000..0f31cc06500f
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_packet.h
@@ -0,0 +1,399 @@
1/*
2 * Copyright © 2014 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#ifndef VC4_PACKET_H
25#define VC4_PACKET_H
26
27#include "vc4_regs.h" /* for VC4_MASK, VC4_GET_FIELD, VC4_SET_FIELD */
28
29enum vc4_packet {
30 VC4_PACKET_HALT = 0,
31 VC4_PACKET_NOP = 1,
32
33 VC4_PACKET_FLUSH = 4,
34 VC4_PACKET_FLUSH_ALL = 5,
35 VC4_PACKET_START_TILE_BINNING = 6,
36 VC4_PACKET_INCREMENT_SEMAPHORE = 7,
37 VC4_PACKET_WAIT_ON_SEMAPHORE = 8,
38
39 VC4_PACKET_BRANCH = 16,
40 VC4_PACKET_BRANCH_TO_SUB_LIST = 17,
41
42 VC4_PACKET_STORE_MS_TILE_BUFFER = 24,
43 VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF = 25,
44 VC4_PACKET_STORE_FULL_RES_TILE_BUFFER = 26,
45 VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER = 27,
46 VC4_PACKET_STORE_TILE_BUFFER_GENERAL = 28,
47 VC4_PACKET_LOAD_TILE_BUFFER_GENERAL = 29,
48
49 VC4_PACKET_GL_INDEXED_PRIMITIVE = 32,
50 VC4_PACKET_GL_ARRAY_PRIMITIVE = 33,
51
52 VC4_PACKET_COMPRESSED_PRIMITIVE = 48,
53 VC4_PACKET_CLIPPED_COMPRESSED_PRIMITIVE = 49,
54
55 VC4_PACKET_PRIMITIVE_LIST_FORMAT = 56,
56
57 VC4_PACKET_GL_SHADER_STATE = 64,
58 VC4_PACKET_NV_SHADER_STATE = 65,
59 VC4_PACKET_VG_SHADER_STATE = 66,
60
61 VC4_PACKET_CONFIGURATION_BITS = 96,
62 VC4_PACKET_FLAT_SHADE_FLAGS = 97,
63 VC4_PACKET_POINT_SIZE = 98,
64 VC4_PACKET_LINE_WIDTH = 99,
65 VC4_PACKET_RHT_X_BOUNDARY = 100,
66 VC4_PACKET_DEPTH_OFFSET = 101,
67 VC4_PACKET_CLIP_WINDOW = 102,
68 VC4_PACKET_VIEWPORT_OFFSET = 103,
69 VC4_PACKET_Z_CLIPPING = 104,
70 VC4_PACKET_CLIPPER_XY_SCALING = 105,
71 VC4_PACKET_CLIPPER_Z_SCALING = 106,
72
73 VC4_PACKET_TILE_BINNING_MODE_CONFIG = 112,
74 VC4_PACKET_TILE_RENDERING_MODE_CONFIG = 113,
75 VC4_PACKET_CLEAR_COLORS = 114,
76 VC4_PACKET_TILE_COORDINATES = 115,
77
78 /* Not an actual hardware packet -- this is what we use to put
79 * references to GEM bos in the command stream, since we need the u32
80 * int the actual address packet in order to store the offset from the
81 * start of the BO.
82 */
83 VC4_PACKET_GEM_HANDLES = 254,
84} __attribute__ ((__packed__));
85
86#define VC4_PACKET_HALT_SIZE 1
87#define VC4_PACKET_NOP_SIZE 1
88#define VC4_PACKET_FLUSH_SIZE 1
89#define VC4_PACKET_FLUSH_ALL_SIZE 1
90#define VC4_PACKET_START_TILE_BINNING_SIZE 1
91#define VC4_PACKET_INCREMENT_SEMAPHORE_SIZE 1
92#define VC4_PACKET_WAIT_ON_SEMAPHORE_SIZE 1
93#define VC4_PACKET_BRANCH_SIZE 5
94#define VC4_PACKET_BRANCH_TO_SUB_LIST_SIZE 5
95#define VC4_PACKET_STORE_MS_TILE_BUFFER_SIZE 1
96#define VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF_SIZE 1
97#define VC4_PACKET_STORE_FULL_RES_TILE_BUFFER_SIZE 5
98#define VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER_SIZE 5
99#define VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE 7
100#define VC4_PACKET_LOAD_TILE_BUFFER_GENERAL_SIZE 7
101#define VC4_PACKET_GL_INDEXED_PRIMITIVE_SIZE 14
102#define VC4_PACKET_GL_ARRAY_PRIMITIVE_SIZE 10
103#define VC4_PACKET_COMPRESSED_PRIMITIVE_SIZE 1
104#define VC4_PACKET_CLIPPED_COMPRESSED_PRIMITIVE_SIZE 1
105#define VC4_PACKET_PRIMITIVE_LIST_FORMAT_SIZE 2
106#define VC4_PACKET_GL_SHADER_STATE_SIZE 5
107#define VC4_PACKET_NV_SHADER_STATE_SIZE 5
108#define VC4_PACKET_VG_SHADER_STATE_SIZE 5
109#define VC4_PACKET_CONFIGURATION_BITS_SIZE 4
110#define VC4_PACKET_FLAT_SHADE_FLAGS_SIZE 5
111#define VC4_PACKET_POINT_SIZE_SIZE 5
112#define VC4_PACKET_LINE_WIDTH_SIZE 5
113#define VC4_PACKET_RHT_X_BOUNDARY_SIZE 3
114#define VC4_PACKET_DEPTH_OFFSET_SIZE 5
115#define VC4_PACKET_CLIP_WINDOW_SIZE 9
116#define VC4_PACKET_VIEWPORT_OFFSET_SIZE 5
117#define VC4_PACKET_Z_CLIPPING_SIZE 9
118#define VC4_PACKET_CLIPPER_XY_SCALING_SIZE 9
119#define VC4_PACKET_CLIPPER_Z_SCALING_SIZE 9
120#define VC4_PACKET_TILE_BINNING_MODE_CONFIG_SIZE 16
121#define VC4_PACKET_TILE_RENDERING_MODE_CONFIG_SIZE 11
122#define VC4_PACKET_CLEAR_COLORS_SIZE 14
123#define VC4_PACKET_TILE_COORDINATES_SIZE 3
124#define VC4_PACKET_GEM_HANDLES_SIZE 9
125
126/* Number of multisamples supported. */
127#define VC4_MAX_SAMPLES 4
128/* Size of a full resolution color or Z tile buffer load/store. */
129#define VC4_TILE_BUFFER_SIZE (64 * 64 * 4)
130
131/** @{
132 * Bits used by packets like VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
133 * VC4_PACKET_TILE_RENDERING_MODE_CONFIG.
134*/
135#define VC4_TILING_FORMAT_LINEAR 0
136#define VC4_TILING_FORMAT_T 1
137#define VC4_TILING_FORMAT_LT 2
138/** @} */
139
140/** @{
141 *
142 * low bits of VC4_PACKET_STORE_FULL_RES_TILE_BUFFER and
143 * VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER.
144 */
145#define VC4_LOADSTORE_FULL_RES_EOF BIT(3)
146#define VC4_LOADSTORE_FULL_RES_DISABLE_CLEAR_ALL BIT(2)
147#define VC4_LOADSTORE_FULL_RES_DISABLE_ZS BIT(1)
148#define VC4_LOADSTORE_FULL_RES_DISABLE_COLOR BIT(0)
149
150/** @{
151 *
152 * low bits of VC4_PACKET_STORE_FULL_RES_TILE_BUFFER and
153 * VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER.
154 */
155#define VC4_LOADSTORE_FULL_RES_EOF BIT(3)
156#define VC4_LOADSTORE_FULL_RES_DISABLE_CLEAR_ALL BIT(2)
157#define VC4_LOADSTORE_FULL_RES_DISABLE_ZS BIT(1)
158#define VC4_LOADSTORE_FULL_RES_DISABLE_COLOR BIT(0)
159
160/** @{
161 *
162 * byte 2 of VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
163 * VC4_PACKET_LOAD_TILE_BUFFER_GENERAL (low bits of the address)
164 */
165
166#define VC4_LOADSTORE_TILE_BUFFER_EOF BIT(3)
167#define VC4_LOADSTORE_TILE_BUFFER_DISABLE_FULL_VG_MASK BIT(2)
168#define VC4_LOADSTORE_TILE_BUFFER_DISABLE_FULL_ZS BIT(1)
169#define VC4_LOADSTORE_TILE_BUFFER_DISABLE_FULL_COLOR BIT(0)
170
171/** @} */
172
173/** @{
174 *
175 * byte 0-1 of VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
176 * VC4_PACKET_LOAD_TILE_BUFFER_GENERAL
177 */
178#define VC4_STORE_TILE_BUFFER_DISABLE_VG_MASK_CLEAR BIT(15)
179#define VC4_STORE_TILE_BUFFER_DISABLE_ZS_CLEAR BIT(14)
180#define VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR BIT(13)
181#define VC4_STORE_TILE_BUFFER_DISABLE_SWAP BIT(12)
182
183#define VC4_LOADSTORE_TILE_BUFFER_FORMAT_MASK VC4_MASK(9, 8)
184#define VC4_LOADSTORE_TILE_BUFFER_FORMAT_SHIFT 8
185#define VC4_LOADSTORE_TILE_BUFFER_RGBA8888 0
186#define VC4_LOADSTORE_TILE_BUFFER_BGR565_DITHER 1
187#define VC4_LOADSTORE_TILE_BUFFER_BGR565 2
188/** @} */
189
190/** @{
191 *
192 * byte 0 of VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
193 * VC4_PACKET_LOAD_TILE_BUFFER_GENERAL
194 */
195#define VC4_STORE_TILE_BUFFER_MODE_MASK VC4_MASK(7, 6)
196#define VC4_STORE_TILE_BUFFER_MODE_SHIFT 6
197#define VC4_STORE_TILE_BUFFER_MODE_SAMPLE0 (0 << 6)
198#define VC4_STORE_TILE_BUFFER_MODE_DECIMATE_X4 (1 << 6)
199#define VC4_STORE_TILE_BUFFER_MODE_DECIMATE_X16 (2 << 6)
200
201/** The values of the field are VC4_TILING_FORMAT_* */
202#define VC4_LOADSTORE_TILE_BUFFER_TILING_MASK VC4_MASK(5, 4)
203#define VC4_LOADSTORE_TILE_BUFFER_TILING_SHIFT 4
204
205#define VC4_LOADSTORE_TILE_BUFFER_BUFFER_MASK VC4_MASK(2, 0)
206#define VC4_LOADSTORE_TILE_BUFFER_BUFFER_SHIFT 0
207#define VC4_LOADSTORE_TILE_BUFFER_NONE 0
208#define VC4_LOADSTORE_TILE_BUFFER_COLOR 1
209#define VC4_LOADSTORE_TILE_BUFFER_ZS 2
210#define VC4_LOADSTORE_TILE_BUFFER_Z 3
211#define VC4_LOADSTORE_TILE_BUFFER_VG_MASK 4
212#define VC4_LOADSTORE_TILE_BUFFER_FULL 5
213/** @} */
214
215#define VC4_INDEX_BUFFER_U8 (0 << 4)
216#define VC4_INDEX_BUFFER_U16 (1 << 4)
217
218/* This flag is only present in NV shader state. */
219#define VC4_SHADER_FLAG_SHADED_CLIP_COORDS BIT(3)
220#define VC4_SHADER_FLAG_ENABLE_CLIPPING BIT(2)
221#define VC4_SHADER_FLAG_VS_POINT_SIZE BIT(1)
222#define VC4_SHADER_FLAG_FS_SINGLE_THREAD BIT(0)
223
224/** @{ byte 2 of config bits. */
225#define VC4_CONFIG_BITS_EARLY_Z_UPDATE BIT(1)
226#define VC4_CONFIG_BITS_EARLY_Z BIT(0)
227/** @} */
228
229/** @{ byte 1 of config bits. */
230#define VC4_CONFIG_BITS_Z_UPDATE BIT(7)
231/** same values in this 3-bit field as PIPE_FUNC_* */
232#define VC4_CONFIG_BITS_DEPTH_FUNC_SHIFT 4
233#define VC4_CONFIG_BITS_COVERAGE_READ_LEAVE BIT(3)
234
235#define VC4_CONFIG_BITS_COVERAGE_UPDATE_NONZERO (0 << 1)
236#define VC4_CONFIG_BITS_COVERAGE_UPDATE_ODD (1 << 1)
237#define VC4_CONFIG_BITS_COVERAGE_UPDATE_OR (2 << 1)
238#define VC4_CONFIG_BITS_COVERAGE_UPDATE_ZERO (3 << 1)
239
240#define VC4_CONFIG_BITS_COVERAGE_PIPE_SELECT BIT(0)
241/** @} */
242
243/** @{ byte 0 of config bits. */
244#define VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_NONE (0 << 6)
245#define VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_4X (1 << 6)
246#define VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_16X (2 << 6)
247
248#define VC4_CONFIG_BITS_AA_POINTS_AND_LINES BIT(4)
249#define VC4_CONFIG_BITS_ENABLE_DEPTH_OFFSET BIT(3)
250#define VC4_CONFIG_BITS_CW_PRIMITIVES BIT(2)
251#define VC4_CONFIG_BITS_ENABLE_PRIM_BACK BIT(1)
252#define VC4_CONFIG_BITS_ENABLE_PRIM_FRONT BIT(0)
253/** @} */
254
255/** @{ bits in the last u8 of VC4_PACKET_TILE_BINNING_MODE_CONFIG */
256#define VC4_BIN_CONFIG_DB_NON_MS BIT(7)
257
258#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_MASK VC4_MASK(6, 5)
259#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_SHIFT 5
260#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_32 0
261#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_64 1
262#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_128 2
263#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_256 3
264
265#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_MASK VC4_MASK(4, 3)
266#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_SHIFT 3
267#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_32 0
268#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_64 1
269#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_128 2
270#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_256 3
271
272#define VC4_BIN_CONFIG_AUTO_INIT_TSDA BIT(2)
273#define VC4_BIN_CONFIG_TILE_BUFFER_64BIT BIT(1)
274#define VC4_BIN_CONFIG_MS_MODE_4X BIT(0)
275/** @} */
276
277/** @{ bits in the last u16 of VC4_PACKET_TILE_RENDERING_MODE_CONFIG */
278#define VC4_RENDER_CONFIG_DB_NON_MS BIT(12)
279#define VC4_RENDER_CONFIG_EARLY_Z_COVERAGE_DISABLE BIT(11)
280#define VC4_RENDER_CONFIG_EARLY_Z_DIRECTION_G BIT(10)
281#define VC4_RENDER_CONFIG_COVERAGE_MODE BIT(9)
282#define VC4_RENDER_CONFIG_ENABLE_VG_MASK BIT(8)
283
284/** The values of the field are VC4_TILING_FORMAT_* */
285#define VC4_RENDER_CONFIG_MEMORY_FORMAT_MASK VC4_MASK(7, 6)
286#define VC4_RENDER_CONFIG_MEMORY_FORMAT_SHIFT 6
287
288#define VC4_RENDER_CONFIG_DECIMATE_MODE_1X (0 << 4)
289#define VC4_RENDER_CONFIG_DECIMATE_MODE_4X (1 << 4)
290#define VC4_RENDER_CONFIG_DECIMATE_MODE_16X (2 << 4)
291
292#define VC4_RENDER_CONFIG_FORMAT_MASK VC4_MASK(3, 2)
293#define VC4_RENDER_CONFIG_FORMAT_SHIFT 2
294#define VC4_RENDER_CONFIG_FORMAT_BGR565_DITHERED 0
295#define VC4_RENDER_CONFIG_FORMAT_RGBA8888 1
296#define VC4_RENDER_CONFIG_FORMAT_BGR565 2
297
298#define VC4_RENDER_CONFIG_TILE_BUFFER_64BIT BIT(1)
299#define VC4_RENDER_CONFIG_MS_MODE_4X BIT(0)
300
301#define VC4_PRIMITIVE_LIST_FORMAT_16_INDEX (1 << 4)
302#define VC4_PRIMITIVE_LIST_FORMAT_32_XY (3 << 4)
303#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_POINTS (0 << 0)
304#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_LINES (1 << 0)
305#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_TRIANGLES (2 << 0)
306#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_RHT (3 << 0)
307
308enum vc4_texture_data_type {
309 VC4_TEXTURE_TYPE_RGBA8888 = 0,
310 VC4_TEXTURE_TYPE_RGBX8888 = 1,
311 VC4_TEXTURE_TYPE_RGBA4444 = 2,
312 VC4_TEXTURE_TYPE_RGBA5551 = 3,
313 VC4_TEXTURE_TYPE_RGB565 = 4,
314 VC4_TEXTURE_TYPE_LUMINANCE = 5,
315 VC4_TEXTURE_TYPE_ALPHA = 6,
316 VC4_TEXTURE_TYPE_LUMALPHA = 7,
317 VC4_TEXTURE_TYPE_ETC1 = 8,
318 VC4_TEXTURE_TYPE_S16F = 9,
319 VC4_TEXTURE_TYPE_S8 = 10,
320 VC4_TEXTURE_TYPE_S16 = 11,
321 VC4_TEXTURE_TYPE_BW1 = 12,
322 VC4_TEXTURE_TYPE_A4 = 13,
323 VC4_TEXTURE_TYPE_A1 = 14,
324 VC4_TEXTURE_TYPE_RGBA64 = 15,
325 VC4_TEXTURE_TYPE_RGBA32R = 16,
326 VC4_TEXTURE_TYPE_YUV422R = 17,
327};
328
329#define VC4_TEX_P0_OFFSET_MASK VC4_MASK(31, 12)
330#define VC4_TEX_P0_OFFSET_SHIFT 12
331#define VC4_TEX_P0_CSWIZ_MASK VC4_MASK(11, 10)
332#define VC4_TEX_P0_CSWIZ_SHIFT 10
333#define VC4_TEX_P0_CMMODE_MASK VC4_MASK(9, 9)
334#define VC4_TEX_P0_CMMODE_SHIFT 9
335#define VC4_TEX_P0_FLIPY_MASK VC4_MASK(8, 8)
336#define VC4_TEX_P0_FLIPY_SHIFT 8
337#define VC4_TEX_P0_TYPE_MASK VC4_MASK(7, 4)
338#define VC4_TEX_P0_TYPE_SHIFT 4
339#define VC4_TEX_P0_MIPLVLS_MASK VC4_MASK(3, 0)
340#define VC4_TEX_P0_MIPLVLS_SHIFT 0
341
342#define VC4_TEX_P1_TYPE4_MASK VC4_MASK(31, 31)
343#define VC4_TEX_P1_TYPE4_SHIFT 31
344#define VC4_TEX_P1_HEIGHT_MASK VC4_MASK(30, 20)
345#define VC4_TEX_P1_HEIGHT_SHIFT 20
346#define VC4_TEX_P1_ETCFLIP_MASK VC4_MASK(19, 19)
347#define VC4_TEX_P1_ETCFLIP_SHIFT 19
348#define VC4_TEX_P1_WIDTH_MASK VC4_MASK(18, 8)
349#define VC4_TEX_P1_WIDTH_SHIFT 8
350
351#define VC4_TEX_P1_MAGFILT_MASK VC4_MASK(7, 7)
352#define VC4_TEX_P1_MAGFILT_SHIFT 7
353# define VC4_TEX_P1_MAGFILT_LINEAR 0
354# define VC4_TEX_P1_MAGFILT_NEAREST 1
355
356#define VC4_TEX_P1_MINFILT_MASK VC4_MASK(6, 4)
357#define VC4_TEX_P1_MINFILT_SHIFT 4
358# define VC4_TEX_P1_MINFILT_LINEAR 0
359# define VC4_TEX_P1_MINFILT_NEAREST 1
360# define VC4_TEX_P1_MINFILT_NEAR_MIP_NEAR 2
361# define VC4_TEX_P1_MINFILT_NEAR_MIP_LIN 3
362# define VC4_TEX_P1_MINFILT_LIN_MIP_NEAR 4
363# define VC4_TEX_P1_MINFILT_LIN_MIP_LIN 5
364
365#define VC4_TEX_P1_WRAP_T_MASK VC4_MASK(3, 2)
366#define VC4_TEX_P1_WRAP_T_SHIFT 2
367#define VC4_TEX_P1_WRAP_S_MASK VC4_MASK(1, 0)
368#define VC4_TEX_P1_WRAP_S_SHIFT 0
369# define VC4_TEX_P1_WRAP_REPEAT 0
370# define VC4_TEX_P1_WRAP_CLAMP 1
371# define VC4_TEX_P1_WRAP_MIRROR 2
372# define VC4_TEX_P1_WRAP_BORDER 3
373
374#define VC4_TEX_P2_PTYPE_MASK VC4_MASK(31, 30)
375#define VC4_TEX_P2_PTYPE_SHIFT 30
376# define VC4_TEX_P2_PTYPE_IGNORED 0
377# define VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE 1
378# define VC4_TEX_P2_PTYPE_CHILD_IMAGE_DIMENSIONS 2
379# define VC4_TEX_P2_PTYPE_CHILD_IMAGE_OFFSETS 3
380
381/* VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE bits */
382#define VC4_TEX_P2_CMST_MASK VC4_MASK(29, 12)
383#define VC4_TEX_P2_CMST_SHIFT 12
384#define VC4_TEX_P2_BSLOD_MASK VC4_MASK(0, 0)
385#define VC4_TEX_P2_BSLOD_SHIFT 0
386
387/* VC4_TEX_P2_PTYPE_CHILD_IMAGE_DIMENSIONS */
388#define VC4_TEX_P2_CHEIGHT_MASK VC4_MASK(22, 12)
389#define VC4_TEX_P2_CHEIGHT_SHIFT 12
390#define VC4_TEX_P2_CWIDTH_MASK VC4_MASK(10, 0)
391#define VC4_TEX_P2_CWIDTH_SHIFT 0
392
393/* VC4_TEX_P2_PTYPE_CHILD_IMAGE_OFFSETS */
394#define VC4_TEX_P2_CYOFF_MASK VC4_MASK(22, 12)
395#define VC4_TEX_P2_CYOFF_SHIFT 12
396#define VC4_TEX_P2_CXOFF_MASK VC4_MASK(10, 0)
397#define VC4_TEX_P2_CXOFF_SHIFT 0
398
399#endif /* VC4_PACKET_H */
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index cdd8b10c0147..0addbad15832 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -29,6 +29,14 @@ struct vc4_plane_state {
29 u32 *dlist; 29 u32 *dlist;
30 u32 dlist_size; /* Number of dwords in allocated for the display list */ 30 u32 dlist_size; /* Number of dwords in allocated for the display list */
31 u32 dlist_count; /* Number of used dwords in the display list. */ 31 u32 dlist_count; /* Number of used dwords in the display list. */
32
33 /* Offset in the dlist to pointer word 0. */
34 u32 pw0_offset;
35
36 /* Offset where the plane's dlist was last stored in the
37 hardware at vc4_crtc_atomic_flush() time.
38 */
39 u32 *hw_dlist;
32}; 40};
33 41
34static inline struct vc4_plane_state * 42static inline struct vc4_plane_state *
@@ -70,7 +78,7 @@ static bool plane_enabled(struct drm_plane_state *state)
70 return state->fb && state->crtc; 78 return state->fb && state->crtc;
71} 79}
72 80
73struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane) 81static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane)
74{ 82{
75 struct vc4_plane_state *vc4_state; 83 struct vc4_plane_state *vc4_state;
76 84
@@ -97,8 +105,8 @@ struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane)
97 return &vc4_state->base; 105 return &vc4_state->base;
98} 106}
99 107
100void vc4_plane_destroy_state(struct drm_plane *plane, 108static void vc4_plane_destroy_state(struct drm_plane *plane,
101 struct drm_plane_state *state) 109 struct drm_plane_state *state)
102{ 110{
103 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); 111 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
104 112
@@ -108,7 +116,7 @@ void vc4_plane_destroy_state(struct drm_plane *plane,
108} 116}
109 117
110/* Called during init to allocate the plane's atomic state. */ 118/* Called during init to allocate the plane's atomic state. */
111void vc4_plane_reset(struct drm_plane *plane) 119static void vc4_plane_reset(struct drm_plane *plane)
112{ 120{
113 struct vc4_plane_state *vc4_state; 121 struct vc4_plane_state *vc4_state;
114 122
@@ -157,6 +165,16 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
157 int crtc_w = state->crtc_w; 165 int crtc_w = state->crtc_w;
158 int crtc_h = state->crtc_h; 166 int crtc_h = state->crtc_h;
159 167
168 if (state->crtc_w << 16 != state->src_w ||
169 state->crtc_h << 16 != state->src_h) {
170 /* We don't support scaling yet, which involves
171 * allocating the LBM memory for scaling temporary
172 * storage, and putting filter kernels in the HVS
173 * context.
174 */
175 return -EINVAL;
176 }
177
160 if (crtc_x < 0) { 178 if (crtc_x < 0) {
161 offset += drm_format_plane_cpp(fb->pixel_format, 0) * -crtc_x; 179 offset += drm_format_plane_cpp(fb->pixel_format, 0) * -crtc_x;
162 crtc_w += crtc_x; 180 crtc_w += crtc_x;
@@ -197,6 +215,8 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
197 /* Position Word 3: Context. Written by the HVS. */ 215 /* Position Word 3: Context. Written by the HVS. */
198 vc4_dlist_write(vc4_state, 0xc0c0c0c0); 216 vc4_dlist_write(vc4_state, 0xc0c0c0c0);
199 217
218 vc4_state->pw0_offset = vc4_state->dlist_count;
219
200 /* Pointer Word 0: RGB / Y Pointer */ 220 /* Pointer Word 0: RGB / Y Pointer */
201 vc4_dlist_write(vc4_state, bo->paddr + offset); 221 vc4_dlist_write(vc4_state, bo->paddr + offset);
202 222
@@ -248,6 +268,8 @@ u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist)
248 struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state); 268 struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
249 int i; 269 int i;
250 270
271 vc4_state->hw_dlist = dlist;
272
251 /* Can't memcpy_toio() because it needs to be 32-bit writes. */ 273 /* Can't memcpy_toio() because it needs to be 32-bit writes. */
252 for (i = 0; i < vc4_state->dlist_count; i++) 274 for (i = 0; i < vc4_state->dlist_count; i++)
253 writel(vc4_state->dlist[i], &dlist[i]); 275 writel(vc4_state->dlist[i], &dlist[i]);
@@ -262,6 +284,34 @@ u32 vc4_plane_dlist_size(struct drm_plane_state *state)
262 return vc4_state->dlist_count; 284 return vc4_state->dlist_count;
263} 285}
264 286
287/* Updates the plane to immediately (well, once the FIFO needs
288 * refilling) scan out from at a new framebuffer.
289 */
290void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
291{
292 struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
293 struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0);
294 uint32_t addr;
295
296 /* We're skipping the address adjustment for negative origin,
297 * because this is only called on the primary plane.
298 */
299 WARN_ON_ONCE(plane->state->crtc_x < 0 || plane->state->crtc_y < 0);
300 addr = bo->paddr + fb->offsets[0];
301
302 /* Write the new address into the hardware immediately. The
303 * scanout will start from this address as soon as the FIFO
304 * needs to refill with pixels.
305 */
306 writel(addr, &vc4_state->hw_dlist[vc4_state->pw0_offset]);
307
308 /* Also update the CPU-side dlist copy, so that any later
309 * atomic updates that don't do a new modeset on our plane
310 * also use our updated address.
311 */
312 vc4_state->dlist[vc4_state->pw0_offset] = addr;
313}
314
265static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = { 315static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
266 .prepare_fb = NULL, 316 .prepare_fb = NULL,
267 .cleanup_fb = NULL, 317 .cleanup_fb = NULL,
@@ -307,7 +357,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
307 ret = drm_universal_plane_init(dev, plane, 0xff, 357 ret = drm_universal_plane_init(dev, plane, 0xff,
308 &vc4_plane_funcs, 358 &vc4_plane_funcs,
309 formats, ARRAY_SIZE(formats), 359 formats, ARRAY_SIZE(formats),
310 type); 360 type, NULL);
311 361
312 drm_plane_helper_add(plane, &vc4_plane_helper_funcs); 362 drm_plane_helper_add(plane, &vc4_plane_helper_funcs);
313 363
diff --git a/drivers/gpu/drm/vc4/vc4_qpu_defines.h b/drivers/gpu/drm/vc4/vc4_qpu_defines.h
new file mode 100644
index 000000000000..d5c2f3c85ebb
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_qpu_defines.h
@@ -0,0 +1,264 @@
1/*
2 * Copyright © 2014 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#ifndef VC4_QPU_DEFINES_H
25#define VC4_QPU_DEFINES_H
26
27enum qpu_op_add {
28 QPU_A_NOP,
29 QPU_A_FADD,
30 QPU_A_FSUB,
31 QPU_A_FMIN,
32 QPU_A_FMAX,
33 QPU_A_FMINABS,
34 QPU_A_FMAXABS,
35 QPU_A_FTOI,
36 QPU_A_ITOF,
37 QPU_A_ADD = 12,
38 QPU_A_SUB,
39 QPU_A_SHR,
40 QPU_A_ASR,
41 QPU_A_ROR,
42 QPU_A_SHL,
43 QPU_A_MIN,
44 QPU_A_MAX,
45 QPU_A_AND,
46 QPU_A_OR,
47 QPU_A_XOR,
48 QPU_A_NOT,
49 QPU_A_CLZ,
50 QPU_A_V8ADDS = 30,
51 QPU_A_V8SUBS = 31,
52};
53
54enum qpu_op_mul {
55 QPU_M_NOP,
56 QPU_M_FMUL,
57 QPU_M_MUL24,
58 QPU_M_V8MULD,
59 QPU_M_V8MIN,
60 QPU_M_V8MAX,
61 QPU_M_V8ADDS,
62 QPU_M_V8SUBS,
63};
64
65enum qpu_raddr {
66 QPU_R_FRAG_PAYLOAD_ZW = 15, /* W for A file, Z for B file */
67 /* 0-31 are the plain regfile a or b fields */
68 QPU_R_UNIF = 32,
69 QPU_R_VARY = 35,
70 QPU_R_ELEM_QPU = 38,
71 QPU_R_NOP,
72 QPU_R_XY_PIXEL_COORD = 41,
73 QPU_R_MS_REV_FLAGS = 41,
74 QPU_R_VPM = 48,
75 QPU_R_VPM_LD_BUSY,
76 QPU_R_VPM_LD_WAIT,
77 QPU_R_MUTEX_ACQUIRE,
78};
79
80enum qpu_waddr {
81 /* 0-31 are the plain regfile a or b fields */
82 QPU_W_ACC0 = 32, /* aka r0 */
83 QPU_W_ACC1,
84 QPU_W_ACC2,
85 QPU_W_ACC3,
86 QPU_W_TMU_NOSWAP,
87 QPU_W_ACC5,
88 QPU_W_HOST_INT,
89 QPU_W_NOP,
90 QPU_W_UNIFORMS_ADDRESS,
91 QPU_W_QUAD_XY, /* X for regfile a, Y for regfile b */
92 QPU_W_MS_FLAGS = 42,
93 QPU_W_REV_FLAG = 42,
94 QPU_W_TLB_STENCIL_SETUP = 43,
95 QPU_W_TLB_Z,
96 QPU_W_TLB_COLOR_MS,
97 QPU_W_TLB_COLOR_ALL,
98 QPU_W_TLB_ALPHA_MASK,
99 QPU_W_VPM,
100 QPU_W_VPMVCD_SETUP, /* LD for regfile a, ST for regfile b */
101 QPU_W_VPM_ADDR, /* LD for regfile a, ST for regfile b */
102 QPU_W_MUTEX_RELEASE,
103 QPU_W_SFU_RECIP,
104 QPU_W_SFU_RECIPSQRT,
105 QPU_W_SFU_EXP,
106 QPU_W_SFU_LOG,
107 QPU_W_TMU0_S,
108 QPU_W_TMU0_T,
109 QPU_W_TMU0_R,
110 QPU_W_TMU0_B,
111 QPU_W_TMU1_S,
112 QPU_W_TMU1_T,
113 QPU_W_TMU1_R,
114 QPU_W_TMU1_B,
115};
116
117enum qpu_sig_bits {
118 QPU_SIG_SW_BREAKPOINT,
119 QPU_SIG_NONE,
120 QPU_SIG_THREAD_SWITCH,
121 QPU_SIG_PROG_END,
122 QPU_SIG_WAIT_FOR_SCOREBOARD,
123 QPU_SIG_SCOREBOARD_UNLOCK,
124 QPU_SIG_LAST_THREAD_SWITCH,
125 QPU_SIG_COVERAGE_LOAD,
126 QPU_SIG_COLOR_LOAD,
127 QPU_SIG_COLOR_LOAD_END,
128 QPU_SIG_LOAD_TMU0,
129 QPU_SIG_LOAD_TMU1,
130 QPU_SIG_ALPHA_MASK_LOAD,
131 QPU_SIG_SMALL_IMM,
132 QPU_SIG_LOAD_IMM,
133 QPU_SIG_BRANCH
134};
135
136enum qpu_mux {
137 /* hardware mux values */
138 QPU_MUX_R0,
139 QPU_MUX_R1,
140 QPU_MUX_R2,
141 QPU_MUX_R3,
142 QPU_MUX_R4,
143 QPU_MUX_R5,
144 QPU_MUX_A,
145 QPU_MUX_B,
146
147 /* non-hardware mux values */
148 QPU_MUX_IMM,
149};
150
151enum qpu_cond {
152 QPU_COND_NEVER,
153 QPU_COND_ALWAYS,
154 QPU_COND_ZS,
155 QPU_COND_ZC,
156 QPU_COND_NS,
157 QPU_COND_NC,
158 QPU_COND_CS,
159 QPU_COND_CC,
160};
161
162enum qpu_pack_mul {
163 QPU_PACK_MUL_NOP,
164 /* replicated to each 8 bits of the 32-bit dst. */
165 QPU_PACK_MUL_8888 = 3,
166 QPU_PACK_MUL_8A,
167 QPU_PACK_MUL_8B,
168 QPU_PACK_MUL_8C,
169 QPU_PACK_MUL_8D,
170};
171
172enum qpu_pack_a {
173 QPU_PACK_A_NOP,
174 /* convert to 16 bit float if float input, or to int16. */
175 QPU_PACK_A_16A,
176 QPU_PACK_A_16B,
177 /* replicated to each 8 bits of the 32-bit dst. */
178 QPU_PACK_A_8888,
179 /* Convert to 8-bit unsigned int. */
180 QPU_PACK_A_8A,
181 QPU_PACK_A_8B,
182 QPU_PACK_A_8C,
183 QPU_PACK_A_8D,
184
185 /* Saturating variants of the previous instructions. */
186 QPU_PACK_A_32_SAT, /* int-only */
187 QPU_PACK_A_16A_SAT, /* int or float */
188 QPU_PACK_A_16B_SAT,
189 QPU_PACK_A_8888_SAT,
190 QPU_PACK_A_8A_SAT,
191 QPU_PACK_A_8B_SAT,
192 QPU_PACK_A_8C_SAT,
193 QPU_PACK_A_8D_SAT,
194};
195
196enum qpu_unpack_r4 {
197 QPU_UNPACK_R4_NOP,
198 QPU_UNPACK_R4_F16A_TO_F32,
199 QPU_UNPACK_R4_F16B_TO_F32,
200 QPU_UNPACK_R4_8D_REP,
201 QPU_UNPACK_R4_8A,
202 QPU_UNPACK_R4_8B,
203 QPU_UNPACK_R4_8C,
204 QPU_UNPACK_R4_8D,
205};
206
207#define QPU_MASK(high, low) \
208 ((((uint64_t)1 << ((high) - (low) + 1)) - 1) << (low))
209
210#define QPU_GET_FIELD(word, field) \
211 ((uint32_t)(((word) & field ## _MASK) >> field ## _SHIFT))
212
213#define QPU_SIG_SHIFT 60
214#define QPU_SIG_MASK QPU_MASK(63, 60)
215
216#define QPU_UNPACK_SHIFT 57
217#define QPU_UNPACK_MASK QPU_MASK(59, 57)
218
219/**
220 * If set, the pack field means PACK_MUL or R4 packing, instead of normal
221 * regfile a packing.
222 */
223#define QPU_PM ((uint64_t)1 << 56)
224
225#define QPU_PACK_SHIFT 52
226#define QPU_PACK_MASK QPU_MASK(55, 52)
227
228#define QPU_COND_ADD_SHIFT 49
229#define QPU_COND_ADD_MASK QPU_MASK(51, 49)
230#define QPU_COND_MUL_SHIFT 46
231#define QPU_COND_MUL_MASK QPU_MASK(48, 46)
232
233#define QPU_SF ((uint64_t)1 << 45)
234
235#define QPU_WADDR_ADD_SHIFT 38
236#define QPU_WADDR_ADD_MASK QPU_MASK(43, 38)
237#define QPU_WADDR_MUL_SHIFT 32
238#define QPU_WADDR_MUL_MASK QPU_MASK(37, 32)
239
240#define QPU_OP_MUL_SHIFT 29
241#define QPU_OP_MUL_MASK QPU_MASK(31, 29)
242
243#define QPU_RADDR_A_SHIFT 18
244#define QPU_RADDR_A_MASK QPU_MASK(23, 18)
245#define QPU_RADDR_B_SHIFT 12
246#define QPU_RADDR_B_MASK QPU_MASK(17, 12)
247#define QPU_SMALL_IMM_SHIFT 12
248#define QPU_SMALL_IMM_MASK QPU_MASK(17, 12)
249
250#define QPU_ADD_A_SHIFT 9
251#define QPU_ADD_A_MASK QPU_MASK(11, 9)
252#define QPU_ADD_B_SHIFT 6
253#define QPU_ADD_B_MASK QPU_MASK(8, 6)
254#define QPU_MUL_A_SHIFT 3
255#define QPU_MUL_A_MASK QPU_MASK(5, 3)
256#define QPU_MUL_B_SHIFT 0
257#define QPU_MUL_B_MASK QPU_MASK(2, 0)
258
259#define QPU_WS ((uint64_t)1 << 44)
260
261#define QPU_OP_ADD_SHIFT 24
262#define QPU_OP_ADD_MASK QPU_MASK(28, 24)
263
264#endif /* VC4_QPU_DEFINES_H */
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index 9e4e904c668e..4e52a0a88551 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -154,7 +154,7 @@
154#define V3D_PCTRS14 0x006f4 154#define V3D_PCTRS14 0x006f4
155#define V3D_PCTR15 0x006f8 155#define V3D_PCTR15 0x006f8
156#define V3D_PCTRS15 0x006fc 156#define V3D_PCTRS15 0x006fc
157#define V3D_BGE 0x00f00 157#define V3D_DBGE 0x00f00
158#define V3D_FDBGO 0x00f04 158#define V3D_FDBGO 0x00f04
159#define V3D_FDBGB 0x00f08 159#define V3D_FDBGB 0x00f08
160#define V3D_FDBGR 0x00f0c 160#define V3D_FDBGR 0x00f0c
diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c
new file mode 100644
index 000000000000..8a2a312e2c1b
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
@@ -0,0 +1,634 @@
1/*
2 * Copyright © 2014-2015 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24/**
25 * DOC: Render command list generation
26 *
27 * In the VC4 driver, render command list generation is performed by the
28 * kernel instead of userspace. We do this because validating a
29 * user-submitted command list is hard to get right and has high CPU overhead,
30 * while the number of valid configurations for render command lists is
31 * actually fairly low.
32 */
33
34#include "uapi/drm/vc4_drm.h"
35#include "vc4_drv.h"
36#include "vc4_packet.h"
37
38struct vc4_rcl_setup {
39 struct drm_gem_cma_object *color_read;
40 struct drm_gem_cma_object *color_write;
41 struct drm_gem_cma_object *zs_read;
42 struct drm_gem_cma_object *zs_write;
43 struct drm_gem_cma_object *msaa_color_write;
44 struct drm_gem_cma_object *msaa_zs_write;
45
46 struct drm_gem_cma_object *rcl;
47 u32 next_offset;
48};
49
50static inline void rcl_u8(struct vc4_rcl_setup *setup, u8 val)
51{
52 *(u8 *)(setup->rcl->vaddr + setup->next_offset) = val;
53 setup->next_offset += 1;
54}
55
56static inline void rcl_u16(struct vc4_rcl_setup *setup, u16 val)
57{
58 *(u16 *)(setup->rcl->vaddr + setup->next_offset) = val;
59 setup->next_offset += 2;
60}
61
62static inline void rcl_u32(struct vc4_rcl_setup *setup, u32 val)
63{
64 *(u32 *)(setup->rcl->vaddr + setup->next_offset) = val;
65 setup->next_offset += 4;
66}
67
68/*
69 * Emits a no-op STORE_TILE_BUFFER_GENERAL.
70 *
71 * If we emit a PACKET_TILE_COORDINATES, it must be followed by a store of
72 * some sort before another load is triggered.
73 */
74static void vc4_store_before_load(struct vc4_rcl_setup *setup)
75{
76 rcl_u8(setup, VC4_PACKET_STORE_TILE_BUFFER_GENERAL);
77 rcl_u16(setup,
78 VC4_SET_FIELD(VC4_LOADSTORE_TILE_BUFFER_NONE,
79 VC4_LOADSTORE_TILE_BUFFER_BUFFER) |
80 VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR |
81 VC4_STORE_TILE_BUFFER_DISABLE_ZS_CLEAR |
82 VC4_STORE_TILE_BUFFER_DISABLE_VG_MASK_CLEAR);
83 rcl_u32(setup, 0); /* no address, since we're in None mode */
84}
85
86/*
87 * Calculates the physical address of the start of a tile in a RCL surface.
88 *
89 * Unlike the other load/store packets,
90 * VC4_PACKET_LOAD/STORE_FULL_RES_TILE_BUFFER don't look at the tile
91 * coordinates packet, and instead just store to the address given.
92 */
93static uint32_t vc4_full_res_offset(struct vc4_exec_info *exec,
94 struct drm_gem_cma_object *bo,
95 struct drm_vc4_submit_rcl_surface *surf,
96 uint8_t x, uint8_t y)
97{
98 return bo->paddr + surf->offset + VC4_TILE_BUFFER_SIZE *
99 (DIV_ROUND_UP(exec->args->width, 32) * y + x);
100}
101
102/*
103 * Emits a PACKET_TILE_COORDINATES if one isn't already pending.
104 *
105 * The tile coordinates packet triggers a pending load if there is one, are
106 * used for clipping during rendering, and determine where loads/stores happen
107 * relative to their base address.
108 */
109static void vc4_tile_coordinates(struct vc4_rcl_setup *setup,
110 uint32_t x, uint32_t y)
111{
112 rcl_u8(setup, VC4_PACKET_TILE_COORDINATES);
113 rcl_u8(setup, x);
114 rcl_u8(setup, y);
115}
116
117static void emit_tile(struct vc4_exec_info *exec,
118 struct vc4_rcl_setup *setup,
119 uint8_t x, uint8_t y, bool first, bool last)
120{
121 struct drm_vc4_submit_cl *args = exec->args;
122 bool has_bin = args->bin_cl_size != 0;
123
124 /* Note that the load doesn't actually occur until the
125 * tile coords packet is processed, and only one load
126 * may be outstanding at a time.
127 */
128 if (setup->color_read) {
129 if (args->color_read.flags &
130 VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) {
131 rcl_u8(setup, VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER);
132 rcl_u32(setup,
133 vc4_full_res_offset(exec, setup->color_read,
134 &args->color_read, x, y) |
135 VC4_LOADSTORE_FULL_RES_DISABLE_ZS);
136 } else {
137 rcl_u8(setup, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL);
138 rcl_u16(setup, args->color_read.bits);
139 rcl_u32(setup, setup->color_read->paddr +
140 args->color_read.offset);
141 }
142 }
143
144 if (setup->zs_read) {
145 if (args->zs_read.flags &
146 VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) {
147 rcl_u8(setup, VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER);
148 rcl_u32(setup,
149 vc4_full_res_offset(exec, setup->zs_read,
150 &args->zs_read, x, y) |
151 VC4_LOADSTORE_FULL_RES_DISABLE_COLOR);
152 } else {
153 if (setup->color_read) {
154 /* Exec previous load. */
155 vc4_tile_coordinates(setup, x, y);
156 vc4_store_before_load(setup);
157 }
158
159 rcl_u8(setup, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL);
160 rcl_u16(setup, args->zs_read.bits);
161 rcl_u32(setup, setup->zs_read->paddr +
162 args->zs_read.offset);
163 }
164 }
165
166 /* Clipping depends on tile coordinates having been
167 * emitted, so we always need one here.
168 */
169 vc4_tile_coordinates(setup, x, y);
170
171 /* Wait for the binner before jumping to the first
172 * tile's lists.
173 */
174 if (first && has_bin)
175 rcl_u8(setup, VC4_PACKET_WAIT_ON_SEMAPHORE);
176
177 if (has_bin) {
178 rcl_u8(setup, VC4_PACKET_BRANCH_TO_SUB_LIST);
179 rcl_u32(setup, (exec->tile_bo->paddr +
180 exec->tile_alloc_offset +
181 (y * exec->bin_tiles_x + x) * 32));
182 }
183
184 if (setup->msaa_color_write) {
185 bool last_tile_write = (!setup->msaa_zs_write &&
186 !setup->zs_write &&
187 !setup->color_write);
188 uint32_t bits = VC4_LOADSTORE_FULL_RES_DISABLE_ZS;
189
190 if (!last_tile_write)
191 bits |= VC4_LOADSTORE_FULL_RES_DISABLE_CLEAR_ALL;
192 else if (last)
193 bits |= VC4_LOADSTORE_FULL_RES_EOF;
194 rcl_u8(setup, VC4_PACKET_STORE_FULL_RES_TILE_BUFFER);
195 rcl_u32(setup,
196 vc4_full_res_offset(exec, setup->msaa_color_write,
197 &args->msaa_color_write, x, y) |
198 bits);
199 }
200
201 if (setup->msaa_zs_write) {
202 bool last_tile_write = (!setup->zs_write &&
203 !setup->color_write);
204 uint32_t bits = VC4_LOADSTORE_FULL_RES_DISABLE_COLOR;
205
206 if (setup->msaa_color_write)
207 vc4_tile_coordinates(setup, x, y);
208 if (!last_tile_write)
209 bits |= VC4_LOADSTORE_FULL_RES_DISABLE_CLEAR_ALL;
210 else if (last)
211 bits |= VC4_LOADSTORE_FULL_RES_EOF;
212 rcl_u8(setup, VC4_PACKET_STORE_FULL_RES_TILE_BUFFER);
213 rcl_u32(setup,
214 vc4_full_res_offset(exec, setup->msaa_zs_write,
215 &args->msaa_zs_write, x, y) |
216 bits);
217 }
218
219 if (setup->zs_write) {
220 bool last_tile_write = !setup->color_write;
221
222 if (setup->msaa_color_write || setup->msaa_zs_write)
223 vc4_tile_coordinates(setup, x, y);
224
225 rcl_u8(setup, VC4_PACKET_STORE_TILE_BUFFER_GENERAL);
226 rcl_u16(setup, args->zs_write.bits |
227 (last_tile_write ?
228 0 : VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR));
229 rcl_u32(setup,
230 (setup->zs_write->paddr + args->zs_write.offset) |
231 ((last && last_tile_write) ?
232 VC4_LOADSTORE_TILE_BUFFER_EOF : 0));
233 }
234
235 if (setup->color_write) {
236 if (setup->msaa_color_write || setup->msaa_zs_write ||
237 setup->zs_write) {
238 vc4_tile_coordinates(setup, x, y);
239 }
240
241 if (last)
242 rcl_u8(setup, VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF);
243 else
244 rcl_u8(setup, VC4_PACKET_STORE_MS_TILE_BUFFER);
245 }
246}
247
248static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec,
249 struct vc4_rcl_setup *setup)
250{
251 struct drm_vc4_submit_cl *args = exec->args;
252 bool has_bin = args->bin_cl_size != 0;
253 uint8_t min_x_tile = args->min_x_tile;
254 uint8_t min_y_tile = args->min_y_tile;
255 uint8_t max_x_tile = args->max_x_tile;
256 uint8_t max_y_tile = args->max_y_tile;
257 uint8_t xtiles = max_x_tile - min_x_tile + 1;
258 uint8_t ytiles = max_y_tile - min_y_tile + 1;
259 uint8_t x, y;
260 uint32_t size, loop_body_size;
261
262 size = VC4_PACKET_TILE_RENDERING_MODE_CONFIG_SIZE;
263 loop_body_size = VC4_PACKET_TILE_COORDINATES_SIZE;
264
265 if (args->flags & VC4_SUBMIT_CL_USE_CLEAR_COLOR) {
266 size += VC4_PACKET_CLEAR_COLORS_SIZE +
267 VC4_PACKET_TILE_COORDINATES_SIZE +
268 VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE;
269 }
270
271 if (setup->color_read) {
272 if (args->color_read.flags &
273 VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) {
274 loop_body_size += VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER_SIZE;
275 } else {
276 loop_body_size += VC4_PACKET_LOAD_TILE_BUFFER_GENERAL_SIZE;
277 }
278 }
279 if (setup->zs_read) {
280 if (args->zs_read.flags &
281 VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) {
282 loop_body_size += VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER_SIZE;
283 } else {
284 if (setup->color_read &&
285 !(args->color_read.flags &
286 VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES)) {
287 loop_body_size += VC4_PACKET_TILE_COORDINATES_SIZE;
288 loop_body_size += VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE;
289 }
290 loop_body_size += VC4_PACKET_LOAD_TILE_BUFFER_GENERAL_SIZE;
291 }
292 }
293
294 if (has_bin) {
295 size += VC4_PACKET_WAIT_ON_SEMAPHORE_SIZE;
296 loop_body_size += VC4_PACKET_BRANCH_TO_SUB_LIST_SIZE;
297 }
298
299 if (setup->msaa_color_write)
300 loop_body_size += VC4_PACKET_STORE_FULL_RES_TILE_BUFFER_SIZE;
301 if (setup->msaa_zs_write)
302 loop_body_size += VC4_PACKET_STORE_FULL_RES_TILE_BUFFER_SIZE;
303
304 if (setup->zs_write)
305 loop_body_size += VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE;
306 if (setup->color_write)
307 loop_body_size += VC4_PACKET_STORE_MS_TILE_BUFFER_SIZE;
308
309 /* We need a VC4_PACKET_TILE_COORDINATES in between each store. */
310 loop_body_size += VC4_PACKET_TILE_COORDINATES_SIZE *
311 ((setup->msaa_color_write != NULL) +
312 (setup->msaa_zs_write != NULL) +
313 (setup->color_write != NULL) +
314 (setup->zs_write != NULL) - 1);
315
316 size += xtiles * ytiles * loop_body_size;
317
318 setup->rcl = &vc4_bo_create(dev, size, true)->base;
319 if (!setup->rcl)
320 return -ENOMEM;
321 list_add_tail(&to_vc4_bo(&setup->rcl->base)->unref_head,
322 &exec->unref_list);
323
324 rcl_u8(setup, VC4_PACKET_TILE_RENDERING_MODE_CONFIG);
325 rcl_u32(setup,
326 (setup->color_write ? (setup->color_write->paddr +
327 args->color_write.offset) :
328 0));
329 rcl_u16(setup, args->width);
330 rcl_u16(setup, args->height);
331 rcl_u16(setup, args->color_write.bits);
332
333 /* The tile buffer gets cleared when the previous tile is stored. If
334 * the clear values changed between frames, then the tile buffer has
335 * stale clear values in it, so we have to do a store in None mode (no
336 * writes) so that we trigger the tile buffer clear.
337 */
338 if (args->flags & VC4_SUBMIT_CL_USE_CLEAR_COLOR) {
339 rcl_u8(setup, VC4_PACKET_CLEAR_COLORS);
340 rcl_u32(setup, args->clear_color[0]);
341 rcl_u32(setup, args->clear_color[1]);
342 rcl_u32(setup, args->clear_z);
343 rcl_u8(setup, args->clear_s);
344
345 vc4_tile_coordinates(setup, 0, 0);
346
347 rcl_u8(setup, VC4_PACKET_STORE_TILE_BUFFER_GENERAL);
348 rcl_u16(setup, VC4_LOADSTORE_TILE_BUFFER_NONE);
349 rcl_u32(setup, 0); /* no address, since we're in None mode */
350 }
351
352 for (y = min_y_tile; y <= max_y_tile; y++) {
353 for (x = min_x_tile; x <= max_x_tile; x++) {
354 bool first = (x == min_x_tile && y == min_y_tile);
355 bool last = (x == max_x_tile && y == max_y_tile);
356
357 emit_tile(exec, setup, x, y, first, last);
358 }
359 }
360
361 BUG_ON(setup->next_offset != size);
362 exec->ct1ca = setup->rcl->paddr;
363 exec->ct1ea = setup->rcl->paddr + setup->next_offset;
364
365 return 0;
366}
367
368static int vc4_full_res_bounds_check(struct vc4_exec_info *exec,
369 struct drm_gem_cma_object *obj,
370 struct drm_vc4_submit_rcl_surface *surf)
371{
372 struct drm_vc4_submit_cl *args = exec->args;
373 u32 render_tiles_stride = DIV_ROUND_UP(exec->args->width, 32);
374
375 if (surf->offset > obj->base.size) {
376 DRM_ERROR("surface offset %d > BO size %zd\n",
377 surf->offset, obj->base.size);
378 return -EINVAL;
379 }
380
381 if ((obj->base.size - surf->offset) / VC4_TILE_BUFFER_SIZE <
382 render_tiles_stride * args->max_y_tile + args->max_x_tile) {
383 DRM_ERROR("MSAA tile %d, %d out of bounds "
384 "(bo size %zd, offset %d).\n",
385 args->max_x_tile, args->max_y_tile,
386 obj->base.size,
387 surf->offset);
388 return -EINVAL;
389 }
390
391 return 0;
392}
393
394static int vc4_rcl_msaa_surface_setup(struct vc4_exec_info *exec,
395 struct drm_gem_cma_object **obj,
396 struct drm_vc4_submit_rcl_surface *surf)
397{
398 if (surf->flags != 0 || surf->bits != 0) {
399 DRM_ERROR("MSAA surface had nonzero flags/bits\n");
400 return -EINVAL;
401 }
402
403 if (surf->hindex == ~0)
404 return 0;
405
406 *obj = vc4_use_bo(exec, surf->hindex);
407 if (!*obj)
408 return -EINVAL;
409
410 if (surf->offset & 0xf) {
411 DRM_ERROR("MSAA write must be 16b aligned.\n");
412 return -EINVAL;
413 }
414
415 return vc4_full_res_bounds_check(exec, *obj, surf);
416}
417
418static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
419 struct drm_gem_cma_object **obj,
420 struct drm_vc4_submit_rcl_surface *surf)
421{
422 uint8_t tiling = VC4_GET_FIELD(surf->bits,
423 VC4_LOADSTORE_TILE_BUFFER_TILING);
424 uint8_t buffer = VC4_GET_FIELD(surf->bits,
425 VC4_LOADSTORE_TILE_BUFFER_BUFFER);
426 uint8_t format = VC4_GET_FIELD(surf->bits,
427 VC4_LOADSTORE_TILE_BUFFER_FORMAT);
428 int cpp;
429 int ret;
430
431 if (surf->flags & ~VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) {
432 DRM_ERROR("Extra flags set\n");
433 return -EINVAL;
434 }
435
436 if (surf->hindex == ~0)
437 return 0;
438
439 *obj = vc4_use_bo(exec, surf->hindex);
440 if (!*obj)
441 return -EINVAL;
442
443 if (surf->flags & VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) {
444 if (surf == &exec->args->zs_write) {
445 DRM_ERROR("general zs write may not be a full-res.\n");
446 return -EINVAL;
447 }
448
449 if (surf->bits != 0) {
450 DRM_ERROR("load/store general bits set with "
451 "full res load/store.\n");
452 return -EINVAL;
453 }
454
455 ret = vc4_full_res_bounds_check(exec, *obj, surf);
456 if (!ret)
457 return ret;
458
459 return 0;
460 }
461
462 if (surf->bits & ~(VC4_LOADSTORE_TILE_BUFFER_TILING_MASK |
463 VC4_LOADSTORE_TILE_BUFFER_BUFFER_MASK |
464 VC4_LOADSTORE_TILE_BUFFER_FORMAT_MASK)) {
465 DRM_ERROR("Unknown bits in load/store: 0x%04x\n",
466 surf->bits);
467 return -EINVAL;
468 }
469
470 if (tiling > VC4_TILING_FORMAT_LT) {
471 DRM_ERROR("Bad tiling format\n");
472 return -EINVAL;
473 }
474
475 if (buffer == VC4_LOADSTORE_TILE_BUFFER_ZS) {
476 if (format != 0) {
477 DRM_ERROR("No color format should be set for ZS\n");
478 return -EINVAL;
479 }
480 cpp = 4;
481 } else if (buffer == VC4_LOADSTORE_TILE_BUFFER_COLOR) {
482 switch (format) {
483 case VC4_LOADSTORE_TILE_BUFFER_BGR565:
484 case VC4_LOADSTORE_TILE_BUFFER_BGR565_DITHER:
485 cpp = 2;
486 break;
487 case VC4_LOADSTORE_TILE_BUFFER_RGBA8888:
488 cpp = 4;
489 break;
490 default:
491 DRM_ERROR("Bad tile buffer format\n");
492 return -EINVAL;
493 }
494 } else {
495 DRM_ERROR("Bad load/store buffer %d.\n", buffer);
496 return -EINVAL;
497 }
498
499 if (surf->offset & 0xf) {
500 DRM_ERROR("load/store buffer must be 16b aligned.\n");
501 return -EINVAL;
502 }
503
504 if (!vc4_check_tex_size(exec, *obj, surf->offset, tiling,
505 exec->args->width, exec->args->height, cpp)) {
506 return -EINVAL;
507 }
508
509 return 0;
510}
511
512static int
513vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec,
514 struct vc4_rcl_setup *setup,
515 struct drm_gem_cma_object **obj,
516 struct drm_vc4_submit_rcl_surface *surf)
517{
518 uint8_t tiling = VC4_GET_FIELD(surf->bits,
519 VC4_RENDER_CONFIG_MEMORY_FORMAT);
520 uint8_t format = VC4_GET_FIELD(surf->bits,
521 VC4_RENDER_CONFIG_FORMAT);
522 int cpp;
523
524 if (surf->flags != 0) {
525 DRM_ERROR("No flags supported on render config.\n");
526 return -EINVAL;
527 }
528
529 if (surf->bits & ~(VC4_RENDER_CONFIG_MEMORY_FORMAT_MASK |
530 VC4_RENDER_CONFIG_FORMAT_MASK |
531 VC4_RENDER_CONFIG_MS_MODE_4X |
532 VC4_RENDER_CONFIG_DECIMATE_MODE_4X)) {
533 DRM_ERROR("Unknown bits in render config: 0x%04x\n",
534 surf->bits);
535 return -EINVAL;
536 }
537
538 if (surf->hindex == ~0)
539 return 0;
540
541 *obj = vc4_use_bo(exec, surf->hindex);
542 if (!*obj)
543 return -EINVAL;
544
545 if (tiling > VC4_TILING_FORMAT_LT) {
546 DRM_ERROR("Bad tiling format\n");
547 return -EINVAL;
548 }
549
550 switch (format) {
551 case VC4_RENDER_CONFIG_FORMAT_BGR565_DITHERED:
552 case VC4_RENDER_CONFIG_FORMAT_BGR565:
553 cpp = 2;
554 break;
555 case VC4_RENDER_CONFIG_FORMAT_RGBA8888:
556 cpp = 4;
557 break;
558 default:
559 DRM_ERROR("Bad tile buffer format\n");
560 return -EINVAL;
561 }
562
563 if (!vc4_check_tex_size(exec, *obj, surf->offset, tiling,
564 exec->args->width, exec->args->height, cpp)) {
565 return -EINVAL;
566 }
567
568 return 0;
569}
570
571int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec)
572{
573 struct vc4_rcl_setup setup = {0};
574 struct drm_vc4_submit_cl *args = exec->args;
575 bool has_bin = args->bin_cl_size != 0;
576 int ret;
577
578 if (args->min_x_tile > args->max_x_tile ||
579 args->min_y_tile > args->max_y_tile) {
580 DRM_ERROR("Bad render tile set (%d,%d)-(%d,%d)\n",
581 args->min_x_tile, args->min_y_tile,
582 args->max_x_tile, args->max_y_tile);
583 return -EINVAL;
584 }
585
586 if (has_bin &&
587 (args->max_x_tile > exec->bin_tiles_x ||
588 args->max_y_tile > exec->bin_tiles_y)) {
589 DRM_ERROR("Render tiles (%d,%d) outside of bin config "
590 "(%d,%d)\n",
591 args->max_x_tile, args->max_y_tile,
592 exec->bin_tiles_x, exec->bin_tiles_y);
593 return -EINVAL;
594 }
595
596 ret = vc4_rcl_render_config_surface_setup(exec, &setup,
597 &setup.color_write,
598 &args->color_write);
599 if (ret)
600 return ret;
601
602 ret = vc4_rcl_surface_setup(exec, &setup.color_read, &args->color_read);
603 if (ret)
604 return ret;
605
606 ret = vc4_rcl_surface_setup(exec, &setup.zs_read, &args->zs_read);
607 if (ret)
608 return ret;
609
610 ret = vc4_rcl_surface_setup(exec, &setup.zs_write, &args->zs_write);
611 if (ret)
612 return ret;
613
614 ret = vc4_rcl_msaa_surface_setup(exec, &setup.msaa_color_write,
615 &args->msaa_color_write);
616 if (ret)
617 return ret;
618
619 ret = vc4_rcl_msaa_surface_setup(exec, &setup.msaa_zs_write,
620 &args->msaa_zs_write);
621 if (ret)
622 return ret;
623
624 /* We shouldn't even have the job submitted to us if there's no
625 * surface to write out.
626 */
627 if (!setup.color_write && !setup.zs_write &&
628 !setup.msaa_color_write && !setup.msaa_zs_write) {
629 DRM_ERROR("RCL requires color or Z/S write\n");
630 return -EINVAL;
631 }
632
633 return vc4_create_rcl_bo(dev, exec, &setup);
634}
diff --git a/drivers/gpu/drm/vc4/vc4_trace.h b/drivers/gpu/drm/vc4/vc4_trace.h
new file mode 100644
index 000000000000..ad7b1ea720c2
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_trace.h
@@ -0,0 +1,63 @@
1/*
2 * Copyright (C) 2015 Broadcom
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#if !defined(_VC4_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
10#define _VC4_TRACE_H_
11
12#include <linux/stringify.h>
13#include <linux/types.h>
14#include <linux/tracepoint.h>
15
16#undef TRACE_SYSTEM
17#define TRACE_SYSTEM vc4
18#define TRACE_INCLUDE_FILE vc4_trace
19
20TRACE_EVENT(vc4_wait_for_seqno_begin,
21 TP_PROTO(struct drm_device *dev, uint64_t seqno, uint64_t timeout),
22 TP_ARGS(dev, seqno, timeout),
23
24 TP_STRUCT__entry(
25 __field(u32, dev)
26 __field(u64, seqno)
27 __field(u64, timeout)
28 ),
29
30 TP_fast_assign(
31 __entry->dev = dev->primary->index;
32 __entry->seqno = seqno;
33 __entry->timeout = timeout;
34 ),
35
36 TP_printk("dev=%u, seqno=%llu, timeout=%llu",
37 __entry->dev, __entry->seqno, __entry->timeout)
38);
39
40TRACE_EVENT(vc4_wait_for_seqno_end,
41 TP_PROTO(struct drm_device *dev, uint64_t seqno),
42 TP_ARGS(dev, seqno),
43
44 TP_STRUCT__entry(
45 __field(u32, dev)
46 __field(u64, seqno)
47 ),
48
49 TP_fast_assign(
50 __entry->dev = dev->primary->index;
51 __entry->seqno = seqno;
52 ),
53
54 TP_printk("dev=%u, seqno=%llu",
55 __entry->dev, __entry->seqno)
56);
57
58#endif /* _VC4_TRACE_H_ */
59
60/* This part must be outside protection */
61#undef TRACE_INCLUDE_PATH
62#define TRACE_INCLUDE_PATH .
63#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/vc4/vc4_trace_points.c b/drivers/gpu/drm/vc4/vc4_trace_points.c
new file mode 100644
index 000000000000..e6278f25716b
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_trace_points.c
@@ -0,0 +1,14 @@
1/*
2 * Copyright (C) 2015 Broadcom
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include "vc4_drv.h"
10
11#ifndef __CHECKER__
12#define CREATE_TRACE_POINTS
13#include "vc4_trace.h"
14#endif
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
new file mode 100644
index 000000000000..424d515ffcda
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -0,0 +1,262 @@
1/*
2 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include "linux/component.h"
20#include "vc4_drv.h"
21#include "vc4_regs.h"
22
23#ifdef CONFIG_DEBUG_FS
24#define REGDEF(reg) { reg, #reg }
25static const struct {
26 uint32_t reg;
27 const char *name;
28} vc4_reg_defs[] = {
29 REGDEF(V3D_IDENT0),
30 REGDEF(V3D_IDENT1),
31 REGDEF(V3D_IDENT2),
32 REGDEF(V3D_SCRATCH),
33 REGDEF(V3D_L2CACTL),
34 REGDEF(V3D_SLCACTL),
35 REGDEF(V3D_INTCTL),
36 REGDEF(V3D_INTENA),
37 REGDEF(V3D_INTDIS),
38 REGDEF(V3D_CT0CS),
39 REGDEF(V3D_CT1CS),
40 REGDEF(V3D_CT0EA),
41 REGDEF(V3D_CT1EA),
42 REGDEF(V3D_CT0CA),
43 REGDEF(V3D_CT1CA),
44 REGDEF(V3D_CT00RA0),
45 REGDEF(V3D_CT01RA0),
46 REGDEF(V3D_CT0LC),
47 REGDEF(V3D_CT1LC),
48 REGDEF(V3D_CT0PC),
49 REGDEF(V3D_CT1PC),
50 REGDEF(V3D_PCS),
51 REGDEF(V3D_BFC),
52 REGDEF(V3D_RFC),
53 REGDEF(V3D_BPCA),
54 REGDEF(V3D_BPCS),
55 REGDEF(V3D_BPOA),
56 REGDEF(V3D_BPOS),
57 REGDEF(V3D_BXCF),
58 REGDEF(V3D_SQRSV0),
59 REGDEF(V3D_SQRSV1),
60 REGDEF(V3D_SQCNTL),
61 REGDEF(V3D_SRQPC),
62 REGDEF(V3D_SRQUA),
63 REGDEF(V3D_SRQUL),
64 REGDEF(V3D_SRQCS),
65 REGDEF(V3D_VPACNTL),
66 REGDEF(V3D_VPMBASE),
67 REGDEF(V3D_PCTRC),
68 REGDEF(V3D_PCTRE),
69 REGDEF(V3D_PCTR0),
70 REGDEF(V3D_PCTRS0),
71 REGDEF(V3D_PCTR1),
72 REGDEF(V3D_PCTRS1),
73 REGDEF(V3D_PCTR2),
74 REGDEF(V3D_PCTRS2),
75 REGDEF(V3D_PCTR3),
76 REGDEF(V3D_PCTRS3),
77 REGDEF(V3D_PCTR4),
78 REGDEF(V3D_PCTRS4),
79 REGDEF(V3D_PCTR5),
80 REGDEF(V3D_PCTRS5),
81 REGDEF(V3D_PCTR6),
82 REGDEF(V3D_PCTRS6),
83 REGDEF(V3D_PCTR7),
84 REGDEF(V3D_PCTRS7),
85 REGDEF(V3D_PCTR8),
86 REGDEF(V3D_PCTRS8),
87 REGDEF(V3D_PCTR9),
88 REGDEF(V3D_PCTRS9),
89 REGDEF(V3D_PCTR10),
90 REGDEF(V3D_PCTRS10),
91 REGDEF(V3D_PCTR11),
92 REGDEF(V3D_PCTRS11),
93 REGDEF(V3D_PCTR12),
94 REGDEF(V3D_PCTRS12),
95 REGDEF(V3D_PCTR13),
96 REGDEF(V3D_PCTRS13),
97 REGDEF(V3D_PCTR14),
98 REGDEF(V3D_PCTRS14),
99 REGDEF(V3D_PCTR15),
100 REGDEF(V3D_PCTRS15),
101 REGDEF(V3D_DBGE),
102 REGDEF(V3D_FDBGO),
103 REGDEF(V3D_FDBGB),
104 REGDEF(V3D_FDBGR),
105 REGDEF(V3D_FDBGS),
106 REGDEF(V3D_ERRSTAT),
107};
108
109int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused)
110{
111 struct drm_info_node *node = (struct drm_info_node *)m->private;
112 struct drm_device *dev = node->minor->dev;
113 struct vc4_dev *vc4 = to_vc4_dev(dev);
114 int i;
115
116 for (i = 0; i < ARRAY_SIZE(vc4_reg_defs); i++) {
117 seq_printf(m, "%s (0x%04x): 0x%08x\n",
118 vc4_reg_defs[i].name, vc4_reg_defs[i].reg,
119 V3D_READ(vc4_reg_defs[i].reg));
120 }
121
122 return 0;
123}
124
125int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused)
126{
127 struct drm_info_node *node = (struct drm_info_node *)m->private;
128 struct drm_device *dev = node->minor->dev;
129 struct vc4_dev *vc4 = to_vc4_dev(dev);
130 uint32_t ident1 = V3D_READ(V3D_IDENT1);
131 uint32_t nslc = VC4_GET_FIELD(ident1, V3D_IDENT1_NSLC);
132 uint32_t tups = VC4_GET_FIELD(ident1, V3D_IDENT1_TUPS);
133 uint32_t qups = VC4_GET_FIELD(ident1, V3D_IDENT1_QUPS);
134
135 seq_printf(m, "Revision: %d\n",
136 VC4_GET_FIELD(ident1, V3D_IDENT1_REV));
137 seq_printf(m, "Slices: %d\n", nslc);
138 seq_printf(m, "TMUs: %d\n", nslc * tups);
139 seq_printf(m, "QPUs: %d\n", nslc * qups);
140 seq_printf(m, "Semaphores: %d\n",
141 VC4_GET_FIELD(ident1, V3D_IDENT1_NSEM));
142
143 return 0;
144}
145#endif /* CONFIG_DEBUG_FS */
146
147/*
148 * Asks the firmware to turn on power to the V3D engine.
149 *
150 * This may be doable with just the clocks interface, though this
151 * packet does some other register setup from the firmware, too.
152 */
153int
154vc4_v3d_set_power(struct vc4_dev *vc4, bool on)
155{
156 if (on)
157 return pm_generic_poweroff(&vc4->v3d->pdev->dev);
158 else
159 return pm_generic_resume(&vc4->v3d->pdev->dev);
160}
161
162static void vc4_v3d_init_hw(struct drm_device *dev)
163{
164 struct vc4_dev *vc4 = to_vc4_dev(dev);
165
166 /* Take all the memory that would have been reserved for user
167 * QPU programs, since we don't have an interface for running
168 * them, anyway.
169 */
170 V3D_WRITE(V3D_VPMBASE, 0);
171}
172
173static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
174{
175 struct platform_device *pdev = to_platform_device(dev);
176 struct drm_device *drm = dev_get_drvdata(master);
177 struct vc4_dev *vc4 = to_vc4_dev(drm);
178 struct vc4_v3d *v3d = NULL;
179 int ret;
180
181 v3d = devm_kzalloc(&pdev->dev, sizeof(*v3d), GFP_KERNEL);
182 if (!v3d)
183 return -ENOMEM;
184
185 v3d->pdev = pdev;
186
187 v3d->regs = vc4_ioremap_regs(pdev, 0);
188 if (IS_ERR(v3d->regs))
189 return PTR_ERR(v3d->regs);
190
191 vc4->v3d = v3d;
192
193 if (V3D_READ(V3D_IDENT0) != V3D_EXPECTED_IDENT0) {
194 DRM_ERROR("V3D_IDENT0 read 0x%08x instead of 0x%08x\n",
195 V3D_READ(V3D_IDENT0), V3D_EXPECTED_IDENT0);
196 return -EINVAL;
197 }
198
199 /* Reset the binner overflow address/size at setup, to be sure
200 * we don't reuse an old one.
201 */
202 V3D_WRITE(V3D_BPOA, 0);
203 V3D_WRITE(V3D_BPOS, 0);
204
205 vc4_v3d_init_hw(drm);
206
207 ret = drm_irq_install(drm, platform_get_irq(pdev, 0));
208 if (ret) {
209 DRM_ERROR("Failed to install IRQ handler\n");
210 return ret;
211 }
212
213 return 0;
214}
215
216static void vc4_v3d_unbind(struct device *dev, struct device *master,
217 void *data)
218{
219 struct drm_device *drm = dev_get_drvdata(master);
220 struct vc4_dev *vc4 = to_vc4_dev(drm);
221
222 drm_irq_uninstall(drm);
223
224 /* Disable the binner's overflow memory address, so the next
225 * driver probe (if any) doesn't try to reuse our old
226 * allocation.
227 */
228 V3D_WRITE(V3D_BPOA, 0);
229 V3D_WRITE(V3D_BPOS, 0);
230
231 vc4->v3d = NULL;
232}
233
234static const struct component_ops vc4_v3d_ops = {
235 .bind = vc4_v3d_bind,
236 .unbind = vc4_v3d_unbind,
237};
238
239static int vc4_v3d_dev_probe(struct platform_device *pdev)
240{
241 return component_add(&pdev->dev, &vc4_v3d_ops);
242}
243
244static int vc4_v3d_dev_remove(struct platform_device *pdev)
245{
246 component_del(&pdev->dev, &vc4_v3d_ops);
247 return 0;
248}
249
250static const struct of_device_id vc4_v3d_dt_match[] = {
251 { .compatible = "brcm,vc4-v3d" },
252 {}
253};
254
255struct platform_driver vc4_v3d_driver = {
256 .probe = vc4_v3d_dev_probe,
257 .remove = vc4_v3d_dev_remove,
258 .driver = {
259 .name = "vc4_v3d",
260 .of_match_table = vc4_v3d_dt_match,
261 },
262};
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
new file mode 100644
index 000000000000..0fb5b994b9dd
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -0,0 +1,900 @@
1/*
2 * Copyright © 2014 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24/**
25 * Command list validator for VC4.
26 *
27 * The VC4 has no IOMMU between it and system memory. So, a user with
28 * access to execute command lists could escalate privilege by
29 * overwriting system memory (drawing to it as a framebuffer) or
30 * reading system memory it shouldn't (reading it as a texture, or
31 * uniform data, or vertex data).
32 *
33 * This validates command lists to ensure that all accesses are within
34 * the bounds of the GEM objects referenced. It explicitly whitelists
35 * packets, and looks at the offsets in any address fields to make
36 * sure they're constrained within the BOs they reference.
37 *
38 * Note that because of the validation that's happening anyway, this
39 * is where GEM relocation processing happens.
40 */
41
42#include "uapi/drm/vc4_drm.h"
43#include "vc4_drv.h"
44#include "vc4_packet.h"
45
46#define VALIDATE_ARGS \
47 struct vc4_exec_info *exec, \
48 void *validated, \
49 void *untrusted
50
51/** Return the width in pixels of a 64-byte microtile. */
52static uint32_t
53utile_width(int cpp)
54{
55 switch (cpp) {
56 case 1:
57 case 2:
58 return 8;
59 case 4:
60 return 4;
61 case 8:
62 return 2;
63 default:
64 DRM_ERROR("unknown cpp: %d\n", cpp);
65 return 1;
66 }
67}
68
69/** Return the height in pixels of a 64-byte microtile. */
70static uint32_t
71utile_height(int cpp)
72{
73 switch (cpp) {
74 case 1:
75 return 8;
76 case 2:
77 case 4:
78 case 8:
79 return 4;
80 default:
81 DRM_ERROR("unknown cpp: %d\n", cpp);
82 return 1;
83 }
84}
85
86/**
87 * The texture unit decides what tiling format a particular miplevel is using
88 * this function, so we lay out our miptrees accordingly.
89 */
90static bool
91size_is_lt(uint32_t width, uint32_t height, int cpp)
92{
93 return (width <= 4 * utile_width(cpp) ||
94 height <= 4 * utile_height(cpp));
95}
96
97struct drm_gem_cma_object *
98vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex)
99{
100 struct drm_gem_cma_object *obj;
101 struct vc4_bo *bo;
102
103 if (hindex >= exec->bo_count) {
104 DRM_ERROR("BO index %d greater than BO count %d\n",
105 hindex, exec->bo_count);
106 return NULL;
107 }
108 obj = exec->bo[hindex];
109 bo = to_vc4_bo(&obj->base);
110
111 if (bo->validated_shader) {
112 DRM_ERROR("Trying to use shader BO as something other than "
113 "a shader\n");
114 return NULL;
115 }
116
117 return obj;
118}
119
120static struct drm_gem_cma_object *
121vc4_use_handle(struct vc4_exec_info *exec, uint32_t gem_handles_packet_index)
122{
123 return vc4_use_bo(exec, exec->bo_index[gem_handles_packet_index]);
124}
125
126static bool
127validate_bin_pos(struct vc4_exec_info *exec, void *untrusted, uint32_t pos)
128{
129 /* Note that the untrusted pointer passed to these functions is
130 * incremented past the packet byte.
131 */
132 return (untrusted - 1 == exec->bin_u + pos);
133}
134
135static uint32_t
136gl_shader_rec_size(uint32_t pointer_bits)
137{
138 uint32_t attribute_count = pointer_bits & 7;
139 bool extended = pointer_bits & 8;
140
141 if (attribute_count == 0)
142 attribute_count = 8;
143
144 if (extended)
145 return 100 + attribute_count * 4;
146 else
147 return 36 + attribute_count * 8;
148}
149
150bool
151vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo,
152 uint32_t offset, uint8_t tiling_format,
153 uint32_t width, uint32_t height, uint8_t cpp)
154{
155 uint32_t aligned_width, aligned_height, stride, size;
156 uint32_t utile_w = utile_width(cpp);
157 uint32_t utile_h = utile_height(cpp);
158
159 /* The shaded vertex format stores signed 12.4 fixed point
160 * (-2048,2047) offsets from the viewport center, so we should
161 * never have a render target larger than 4096. The texture
162 * unit can only sample from 2048x2048, so it's even more
163 * restricted. This lets us avoid worrying about overflow in
164 * our math.
165 */
166 if (width > 4096 || height > 4096) {
167 DRM_ERROR("Surface dimesions (%d,%d) too large", width, height);
168 return false;
169 }
170
171 switch (tiling_format) {
172 case VC4_TILING_FORMAT_LINEAR:
173 aligned_width = round_up(width, utile_w);
174 aligned_height = height;
175 break;
176 case VC4_TILING_FORMAT_T:
177 aligned_width = round_up(width, utile_w * 8);
178 aligned_height = round_up(height, utile_h * 8);
179 break;
180 case VC4_TILING_FORMAT_LT:
181 aligned_width = round_up(width, utile_w);
182 aligned_height = round_up(height, utile_h);
183 break;
184 default:
185 DRM_ERROR("buffer tiling %d unsupported\n", tiling_format);
186 return false;
187 }
188
189 stride = aligned_width * cpp;
190 size = stride * aligned_height;
191
192 if (size + offset < size ||
193 size + offset > fbo->base.size) {
194 DRM_ERROR("Overflow in %dx%d (%dx%d) fbo size (%d + %d > %zd)\n",
195 width, height,
196 aligned_width, aligned_height,
197 size, offset, fbo->base.size);
198 return false;
199 }
200
201 return true;
202}
203
204static int
205validate_flush(VALIDATE_ARGS)
206{
207 if (!validate_bin_pos(exec, untrusted, exec->args->bin_cl_size - 1)) {
208 DRM_ERROR("Bin CL must end with VC4_PACKET_FLUSH\n");
209 return -EINVAL;
210 }
211 exec->found_flush = true;
212
213 return 0;
214}
215
216static int
217validate_start_tile_binning(VALIDATE_ARGS)
218{
219 if (exec->found_start_tile_binning_packet) {
220 DRM_ERROR("Duplicate VC4_PACKET_START_TILE_BINNING\n");
221 return -EINVAL;
222 }
223 exec->found_start_tile_binning_packet = true;
224
225 if (!exec->found_tile_binning_mode_config_packet) {
226 DRM_ERROR("missing VC4_PACKET_TILE_BINNING_MODE_CONFIG\n");
227 return -EINVAL;
228 }
229
230 return 0;
231}
232
233static int
234validate_increment_semaphore(VALIDATE_ARGS)
235{
236 if (!validate_bin_pos(exec, untrusted, exec->args->bin_cl_size - 2)) {
237 DRM_ERROR("Bin CL must end with "
238 "VC4_PACKET_INCREMENT_SEMAPHORE\n");
239 return -EINVAL;
240 }
241 exec->found_increment_semaphore_packet = true;
242
243 return 0;
244}
245
246static int
247validate_indexed_prim_list(VALIDATE_ARGS)
248{
249 struct drm_gem_cma_object *ib;
250 uint32_t length = *(uint32_t *)(untrusted + 1);
251 uint32_t offset = *(uint32_t *)(untrusted + 5);
252 uint32_t max_index = *(uint32_t *)(untrusted + 9);
253 uint32_t index_size = (*(uint8_t *)(untrusted + 0) >> 4) ? 2 : 1;
254 struct vc4_shader_state *shader_state;
255
256 /* Check overflow condition */
257 if (exec->shader_state_count == 0) {
258 DRM_ERROR("shader state must precede primitives\n");
259 return -EINVAL;
260 }
261 shader_state = &exec->shader_state[exec->shader_state_count - 1];
262
263 if (max_index > shader_state->max_index)
264 shader_state->max_index = max_index;
265
266 ib = vc4_use_handle(exec, 0);
267 if (!ib)
268 return -EINVAL;
269
270 if (offset > ib->base.size ||
271 (ib->base.size - offset) / index_size < length) {
272 DRM_ERROR("IB access overflow (%d + %d*%d > %zd)\n",
273 offset, length, index_size, ib->base.size);
274 return -EINVAL;
275 }
276
277 *(uint32_t *)(validated + 5) = ib->paddr + offset;
278
279 return 0;
280}
281
282static int
283validate_gl_array_primitive(VALIDATE_ARGS)
284{
285 uint32_t length = *(uint32_t *)(untrusted + 1);
286 uint32_t base_index = *(uint32_t *)(untrusted + 5);
287 uint32_t max_index;
288 struct vc4_shader_state *shader_state;
289
290 /* Check overflow condition */
291 if (exec->shader_state_count == 0) {
292 DRM_ERROR("shader state must precede primitives\n");
293 return -EINVAL;
294 }
295 shader_state = &exec->shader_state[exec->shader_state_count - 1];
296
297 if (length + base_index < length) {
298 DRM_ERROR("primitive vertex count overflow\n");
299 return -EINVAL;
300 }
301 max_index = length + base_index - 1;
302
303 if (max_index > shader_state->max_index)
304 shader_state->max_index = max_index;
305
306 return 0;
307}
308
309static int
310validate_gl_shader_state(VALIDATE_ARGS)
311{
312 uint32_t i = exec->shader_state_count++;
313
314 if (i >= exec->shader_state_size) {
315 DRM_ERROR("More requests for shader states than declared\n");
316 return -EINVAL;
317 }
318
319 exec->shader_state[i].addr = *(uint32_t *)untrusted;
320 exec->shader_state[i].max_index = 0;
321
322 if (exec->shader_state[i].addr & ~0xf) {
323 DRM_ERROR("high bits set in GL shader rec reference\n");
324 return -EINVAL;
325 }
326
327 *(uint32_t *)validated = (exec->shader_rec_p +
328 exec->shader_state[i].addr);
329
330 exec->shader_rec_p +=
331 roundup(gl_shader_rec_size(exec->shader_state[i].addr), 16);
332
333 return 0;
334}
335
336static int
337validate_tile_binning_config(VALIDATE_ARGS)
338{
339 struct drm_device *dev = exec->exec_bo->base.dev;
340 struct vc4_bo *tile_bo;
341 uint8_t flags;
342 uint32_t tile_state_size, tile_alloc_size;
343 uint32_t tile_count;
344
345 if (exec->found_tile_binning_mode_config_packet) {
346 DRM_ERROR("Duplicate VC4_PACKET_TILE_BINNING_MODE_CONFIG\n");
347 return -EINVAL;
348 }
349 exec->found_tile_binning_mode_config_packet = true;
350
351 exec->bin_tiles_x = *(uint8_t *)(untrusted + 12);
352 exec->bin_tiles_y = *(uint8_t *)(untrusted + 13);
353 tile_count = exec->bin_tiles_x * exec->bin_tiles_y;
354 flags = *(uint8_t *)(untrusted + 14);
355
356 if (exec->bin_tiles_x == 0 ||
357 exec->bin_tiles_y == 0) {
358 DRM_ERROR("Tile binning config of %dx%d too small\n",
359 exec->bin_tiles_x, exec->bin_tiles_y);
360 return -EINVAL;
361 }
362
363 if (flags & (VC4_BIN_CONFIG_DB_NON_MS |
364 VC4_BIN_CONFIG_TILE_BUFFER_64BIT)) {
365 DRM_ERROR("unsupported binning config flags 0x%02x\n", flags);
366 return -EINVAL;
367 }
368
369 /* The tile state data array is 48 bytes per tile, and we put it at
370 * the start of a BO containing both it and the tile alloc.
371 */
372 tile_state_size = 48 * tile_count;
373
374 /* Since the tile alloc array will follow us, align. */
375 exec->tile_alloc_offset = roundup(tile_state_size, 4096);
376
377 *(uint8_t *)(validated + 14) =
378 ((flags & ~(VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_MASK |
379 VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_MASK)) |
380 VC4_BIN_CONFIG_AUTO_INIT_TSDA |
381 VC4_SET_FIELD(VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_32,
382 VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE) |
383 VC4_SET_FIELD(VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_128,
384 VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE));
385
386 /* Initial block size. */
387 tile_alloc_size = 32 * tile_count;
388
389 /*
390 * The initial allocation gets rounded to the next 256 bytes before
391 * the hardware starts fulfilling further allocations.
392 */
393 tile_alloc_size = roundup(tile_alloc_size, 256);
394
395 /* Add space for the extra allocations. This is what gets used first,
396 * before overflow memory. It must have at least 4096 bytes, but we
397 * want to avoid overflow memory usage if possible.
398 */
399 tile_alloc_size += 1024 * 1024;
400
401 tile_bo = vc4_bo_create(dev, exec->tile_alloc_offset + tile_alloc_size,
402 true);
403 exec->tile_bo = &tile_bo->base;
404 if (!exec->tile_bo)
405 return -ENOMEM;
406 list_add_tail(&tile_bo->unref_head, &exec->unref_list);
407
408 /* tile alloc address. */
409 *(uint32_t *)(validated + 0) = (exec->tile_bo->paddr +
410 exec->tile_alloc_offset);
411 /* tile alloc size. */
412 *(uint32_t *)(validated + 4) = tile_alloc_size;
413 /* tile state address. */
414 *(uint32_t *)(validated + 8) = exec->tile_bo->paddr;
415
416 return 0;
417}
418
419static int
420validate_gem_handles(VALIDATE_ARGS)
421{
422 memcpy(exec->bo_index, untrusted, sizeof(exec->bo_index));
423 return 0;
424}
425
426#define VC4_DEFINE_PACKET(packet, func) \
427 [packet] = { packet ## _SIZE, #packet, func }
428
429static const struct cmd_info {
430 uint16_t len;
431 const char *name;
432 int (*func)(struct vc4_exec_info *exec, void *validated,
433 void *untrusted);
434} cmd_info[] = {
435 VC4_DEFINE_PACKET(VC4_PACKET_HALT, NULL),
436 VC4_DEFINE_PACKET(VC4_PACKET_NOP, NULL),
437 VC4_DEFINE_PACKET(VC4_PACKET_FLUSH, validate_flush),
438 VC4_DEFINE_PACKET(VC4_PACKET_FLUSH_ALL, NULL),
439 VC4_DEFINE_PACKET(VC4_PACKET_START_TILE_BINNING,
440 validate_start_tile_binning),
441 VC4_DEFINE_PACKET(VC4_PACKET_INCREMENT_SEMAPHORE,
442 validate_increment_semaphore),
443
444 VC4_DEFINE_PACKET(VC4_PACKET_GL_INDEXED_PRIMITIVE,
445 validate_indexed_prim_list),
446 VC4_DEFINE_PACKET(VC4_PACKET_GL_ARRAY_PRIMITIVE,
447 validate_gl_array_primitive),
448
449 VC4_DEFINE_PACKET(VC4_PACKET_PRIMITIVE_LIST_FORMAT, NULL),
450
451 VC4_DEFINE_PACKET(VC4_PACKET_GL_SHADER_STATE, validate_gl_shader_state),
452
453 VC4_DEFINE_PACKET(VC4_PACKET_CONFIGURATION_BITS, NULL),
454 VC4_DEFINE_PACKET(VC4_PACKET_FLAT_SHADE_FLAGS, NULL),
455 VC4_DEFINE_PACKET(VC4_PACKET_POINT_SIZE, NULL),
456 VC4_DEFINE_PACKET(VC4_PACKET_LINE_WIDTH, NULL),
457 VC4_DEFINE_PACKET(VC4_PACKET_RHT_X_BOUNDARY, NULL),
458 VC4_DEFINE_PACKET(VC4_PACKET_DEPTH_OFFSET, NULL),
459 VC4_DEFINE_PACKET(VC4_PACKET_CLIP_WINDOW, NULL),
460 VC4_DEFINE_PACKET(VC4_PACKET_VIEWPORT_OFFSET, NULL),
461 VC4_DEFINE_PACKET(VC4_PACKET_CLIPPER_XY_SCALING, NULL),
462 /* Note: The docs say this was also 105, but it was 106 in the
463 * initial userland code drop.
464 */
465 VC4_DEFINE_PACKET(VC4_PACKET_CLIPPER_Z_SCALING, NULL),
466
467 VC4_DEFINE_PACKET(VC4_PACKET_TILE_BINNING_MODE_CONFIG,
468 validate_tile_binning_config),
469
470 VC4_DEFINE_PACKET(VC4_PACKET_GEM_HANDLES, validate_gem_handles),
471};
472
473int
474vc4_validate_bin_cl(struct drm_device *dev,
475 void *validated,
476 void *unvalidated,
477 struct vc4_exec_info *exec)
478{
479 uint32_t len = exec->args->bin_cl_size;
480 uint32_t dst_offset = 0;
481 uint32_t src_offset = 0;
482
483 while (src_offset < len) {
484 void *dst_pkt = validated + dst_offset;
485 void *src_pkt = unvalidated + src_offset;
486 u8 cmd = *(uint8_t *)src_pkt;
487 const struct cmd_info *info;
488
489 if (cmd >= ARRAY_SIZE(cmd_info)) {
490 DRM_ERROR("0x%08x: packet %d out of bounds\n",
491 src_offset, cmd);
492 return -EINVAL;
493 }
494
495 info = &cmd_info[cmd];
496 if (!info->name) {
497 DRM_ERROR("0x%08x: packet %d invalid\n",
498 src_offset, cmd);
499 return -EINVAL;
500 }
501
502 if (src_offset + info->len > len) {
503 DRM_ERROR("0x%08x: packet %d (%s) length 0x%08x "
504 "exceeds bounds (0x%08x)\n",
505 src_offset, cmd, info->name, info->len,
506 src_offset + len);
507 return -EINVAL;
508 }
509
510 if (cmd != VC4_PACKET_GEM_HANDLES)
511 memcpy(dst_pkt, src_pkt, info->len);
512
513 if (info->func && info->func(exec,
514 dst_pkt + 1,
515 src_pkt + 1)) {
516 DRM_ERROR("0x%08x: packet %d (%s) failed to validate\n",
517 src_offset, cmd, info->name);
518 return -EINVAL;
519 }
520
521 src_offset += info->len;
522 /* GEM handle loading doesn't produce HW packets. */
523 if (cmd != VC4_PACKET_GEM_HANDLES)
524 dst_offset += info->len;
525
526 /* When the CL hits halt, it'll stop reading anything else. */
527 if (cmd == VC4_PACKET_HALT)
528 break;
529 }
530
531 exec->ct0ea = exec->ct0ca + dst_offset;
532
533 if (!exec->found_start_tile_binning_packet) {
534 DRM_ERROR("Bin CL missing VC4_PACKET_START_TILE_BINNING\n");
535 return -EINVAL;
536 }
537
538 /* The bin CL must be ended with INCREMENT_SEMAPHORE and FLUSH. The
539 * semaphore is used to trigger the render CL to start up, and the
540 * FLUSH is what caps the bin lists with
541 * VC4_PACKET_RETURN_FROM_SUB_LIST (so they jump back to the main
542 * render CL when they get called to) and actually triggers the queued
543 * semaphore increment.
544 */
545 if (!exec->found_increment_semaphore_packet || !exec->found_flush) {
546 DRM_ERROR("Bin CL missing VC4_PACKET_INCREMENT_SEMAPHORE + "
547 "VC4_PACKET_FLUSH\n");
548 return -EINVAL;
549 }
550
551 return 0;
552}
553
554static bool
555reloc_tex(struct vc4_exec_info *exec,
556 void *uniform_data_u,
557 struct vc4_texture_sample_info *sample,
558 uint32_t texture_handle_index)
559
560{
561 struct drm_gem_cma_object *tex;
562 uint32_t p0 = *(uint32_t *)(uniform_data_u + sample->p_offset[0]);
563 uint32_t p1 = *(uint32_t *)(uniform_data_u + sample->p_offset[1]);
564 uint32_t p2 = (sample->p_offset[2] != ~0 ?
565 *(uint32_t *)(uniform_data_u + sample->p_offset[2]) : 0);
566 uint32_t p3 = (sample->p_offset[3] != ~0 ?
567 *(uint32_t *)(uniform_data_u + sample->p_offset[3]) : 0);
568 uint32_t *validated_p0 = exec->uniforms_v + sample->p_offset[0];
569 uint32_t offset = p0 & VC4_TEX_P0_OFFSET_MASK;
570 uint32_t miplevels = VC4_GET_FIELD(p0, VC4_TEX_P0_MIPLVLS);
571 uint32_t width = VC4_GET_FIELD(p1, VC4_TEX_P1_WIDTH);
572 uint32_t height = VC4_GET_FIELD(p1, VC4_TEX_P1_HEIGHT);
573 uint32_t cpp, tiling_format, utile_w, utile_h;
574 uint32_t i;
575 uint32_t cube_map_stride = 0;
576 enum vc4_texture_data_type type;
577
578 tex = vc4_use_bo(exec, texture_handle_index);
579 if (!tex)
580 return false;
581
582 if (sample->is_direct) {
583 uint32_t remaining_size = tex->base.size - p0;
584
585 if (p0 > tex->base.size - 4) {
586 DRM_ERROR("UBO offset greater than UBO size\n");
587 goto fail;
588 }
589 if (p1 > remaining_size - 4) {
590 DRM_ERROR("UBO clamp would allow reads "
591 "outside of UBO\n");
592 goto fail;
593 }
594 *validated_p0 = tex->paddr + p0;
595 return true;
596 }
597
598 if (width == 0)
599 width = 2048;
600 if (height == 0)
601 height = 2048;
602
603 if (p0 & VC4_TEX_P0_CMMODE_MASK) {
604 if (VC4_GET_FIELD(p2, VC4_TEX_P2_PTYPE) ==
605 VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE)
606 cube_map_stride = p2 & VC4_TEX_P2_CMST_MASK;
607 if (VC4_GET_FIELD(p3, VC4_TEX_P2_PTYPE) ==
608 VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE) {
609 if (cube_map_stride) {
610 DRM_ERROR("Cube map stride set twice\n");
611 goto fail;
612 }
613
614 cube_map_stride = p3 & VC4_TEX_P2_CMST_MASK;
615 }
616 if (!cube_map_stride) {
617 DRM_ERROR("Cube map stride not set\n");
618 goto fail;
619 }
620 }
621
622 type = (VC4_GET_FIELD(p0, VC4_TEX_P0_TYPE) |
623 (VC4_GET_FIELD(p1, VC4_TEX_P1_TYPE4) << 4));
624
625 switch (type) {
626 case VC4_TEXTURE_TYPE_RGBA8888:
627 case VC4_TEXTURE_TYPE_RGBX8888:
628 case VC4_TEXTURE_TYPE_RGBA32R:
629 cpp = 4;
630 break;
631 case VC4_TEXTURE_TYPE_RGBA4444:
632 case VC4_TEXTURE_TYPE_RGBA5551:
633 case VC4_TEXTURE_TYPE_RGB565:
634 case VC4_TEXTURE_TYPE_LUMALPHA:
635 case VC4_TEXTURE_TYPE_S16F:
636 case VC4_TEXTURE_TYPE_S16:
637 cpp = 2;
638 break;
639 case VC4_TEXTURE_TYPE_LUMINANCE:
640 case VC4_TEXTURE_TYPE_ALPHA:
641 case VC4_TEXTURE_TYPE_S8:
642 cpp = 1;
643 break;
644 case VC4_TEXTURE_TYPE_ETC1:
645 case VC4_TEXTURE_TYPE_BW1:
646 case VC4_TEXTURE_TYPE_A4:
647 case VC4_TEXTURE_TYPE_A1:
648 case VC4_TEXTURE_TYPE_RGBA64:
649 case VC4_TEXTURE_TYPE_YUV422R:
650 default:
651 DRM_ERROR("Texture format %d unsupported\n", type);
652 goto fail;
653 }
654 utile_w = utile_width(cpp);
655 utile_h = utile_height(cpp);
656
657 if (type == VC4_TEXTURE_TYPE_RGBA32R) {
658 tiling_format = VC4_TILING_FORMAT_LINEAR;
659 } else {
660 if (size_is_lt(width, height, cpp))
661 tiling_format = VC4_TILING_FORMAT_LT;
662 else
663 tiling_format = VC4_TILING_FORMAT_T;
664 }
665
666 if (!vc4_check_tex_size(exec, tex, offset + cube_map_stride * 5,
667 tiling_format, width, height, cpp)) {
668 goto fail;
669 }
670
671 /* The mipmap levels are stored before the base of the texture. Make
672 * sure there is actually space in the BO.
673 */
674 for (i = 1; i <= miplevels; i++) {
675 uint32_t level_width = max(width >> i, 1u);
676 uint32_t level_height = max(height >> i, 1u);
677 uint32_t aligned_width, aligned_height;
678 uint32_t level_size;
679
680 /* Once the levels get small enough, they drop from T to LT. */
681 if (tiling_format == VC4_TILING_FORMAT_T &&
682 size_is_lt(level_width, level_height, cpp)) {
683 tiling_format = VC4_TILING_FORMAT_LT;
684 }
685
686 switch (tiling_format) {
687 case VC4_TILING_FORMAT_T:
688 aligned_width = round_up(level_width, utile_w * 8);
689 aligned_height = round_up(level_height, utile_h * 8);
690 break;
691 case VC4_TILING_FORMAT_LT:
692 aligned_width = round_up(level_width, utile_w);
693 aligned_height = round_up(level_height, utile_h);
694 break;
695 default:
696 aligned_width = round_up(level_width, utile_w);
697 aligned_height = level_height;
698 break;
699 }
700
701 level_size = aligned_width * cpp * aligned_height;
702
703 if (offset < level_size) {
704 DRM_ERROR("Level %d (%dx%d -> %dx%d) size %db "
705 "overflowed buffer bounds (offset %d)\n",
706 i, level_width, level_height,
707 aligned_width, aligned_height,
708 level_size, offset);
709 goto fail;
710 }
711
712 offset -= level_size;
713 }
714
715 *validated_p0 = tex->paddr + p0;
716
717 return true;
718 fail:
719 DRM_INFO("Texture p0 at %d: 0x%08x\n", sample->p_offset[0], p0);
720 DRM_INFO("Texture p1 at %d: 0x%08x\n", sample->p_offset[1], p1);
721 DRM_INFO("Texture p2 at %d: 0x%08x\n", sample->p_offset[2], p2);
722 DRM_INFO("Texture p3 at %d: 0x%08x\n", sample->p_offset[3], p3);
723 return false;
724}
725
726static int
727validate_gl_shader_rec(struct drm_device *dev,
728 struct vc4_exec_info *exec,
729 struct vc4_shader_state *state)
730{
731 uint32_t *src_handles;
732 void *pkt_u, *pkt_v;
733 static const uint32_t shader_reloc_offsets[] = {
734 4, /* fs */
735 16, /* vs */
736 28, /* cs */
737 };
738 uint32_t shader_reloc_count = ARRAY_SIZE(shader_reloc_offsets);
739 struct drm_gem_cma_object *bo[shader_reloc_count + 8];
740 uint32_t nr_attributes, nr_relocs, packet_size;
741 int i;
742
743 nr_attributes = state->addr & 0x7;
744 if (nr_attributes == 0)
745 nr_attributes = 8;
746 packet_size = gl_shader_rec_size(state->addr);
747
748 nr_relocs = ARRAY_SIZE(shader_reloc_offsets) + nr_attributes;
749 if (nr_relocs * 4 > exec->shader_rec_size) {
750 DRM_ERROR("overflowed shader recs reading %d handles "
751 "from %d bytes left\n",
752 nr_relocs, exec->shader_rec_size);
753 return -EINVAL;
754 }
755 src_handles = exec->shader_rec_u;
756 exec->shader_rec_u += nr_relocs * 4;
757 exec->shader_rec_size -= nr_relocs * 4;
758
759 if (packet_size > exec->shader_rec_size) {
760 DRM_ERROR("overflowed shader recs copying %db packet "
761 "from %d bytes left\n",
762 packet_size, exec->shader_rec_size);
763 return -EINVAL;
764 }
765 pkt_u = exec->shader_rec_u;
766 pkt_v = exec->shader_rec_v;
767 memcpy(pkt_v, pkt_u, packet_size);
768 exec->shader_rec_u += packet_size;
769 /* Shader recs have to be aligned to 16 bytes (due to the attribute
770 * flags being in the low bytes), so round the next validated shader
771 * rec address up. This should be safe, since we've got so many
772 * relocations in a shader rec packet.
773 */
774 BUG_ON(roundup(packet_size, 16) - packet_size > nr_relocs * 4);
775 exec->shader_rec_v += roundup(packet_size, 16);
776 exec->shader_rec_size -= packet_size;
777
778 if (!(*(uint16_t *)pkt_u & VC4_SHADER_FLAG_FS_SINGLE_THREAD)) {
779 DRM_ERROR("Multi-threaded fragment shaders not supported.\n");
780 return -EINVAL;
781 }
782
783 for (i = 0; i < shader_reloc_count; i++) {
784 if (src_handles[i] > exec->bo_count) {
785 DRM_ERROR("Shader handle %d too big\n", src_handles[i]);
786 return -EINVAL;
787 }
788
789 bo[i] = exec->bo[src_handles[i]];
790 if (!bo[i])
791 return -EINVAL;
792 }
793 for (i = shader_reloc_count; i < nr_relocs; i++) {
794 bo[i] = vc4_use_bo(exec, src_handles[i]);
795 if (!bo[i])
796 return -EINVAL;
797 }
798
799 for (i = 0; i < shader_reloc_count; i++) {
800 struct vc4_validated_shader_info *validated_shader;
801 uint32_t o = shader_reloc_offsets[i];
802 uint32_t src_offset = *(uint32_t *)(pkt_u + o);
803 uint32_t *texture_handles_u;
804 void *uniform_data_u;
805 uint32_t tex;
806
807 *(uint32_t *)(pkt_v + o) = bo[i]->paddr + src_offset;
808
809 if (src_offset != 0) {
810 DRM_ERROR("Shaders must be at offset 0 of "
811 "the BO.\n");
812 return -EINVAL;
813 }
814
815 validated_shader = to_vc4_bo(&bo[i]->base)->validated_shader;
816 if (!validated_shader)
817 return -EINVAL;
818
819 if (validated_shader->uniforms_src_size >
820 exec->uniforms_size) {
821 DRM_ERROR("Uniforms src buffer overflow\n");
822 return -EINVAL;
823 }
824
825 texture_handles_u = exec->uniforms_u;
826 uniform_data_u = (texture_handles_u +
827 validated_shader->num_texture_samples);
828
829 memcpy(exec->uniforms_v, uniform_data_u,
830 validated_shader->uniforms_size);
831
832 for (tex = 0;
833 tex < validated_shader->num_texture_samples;
834 tex++) {
835 if (!reloc_tex(exec,
836 uniform_data_u,
837 &validated_shader->texture_samples[tex],
838 texture_handles_u[tex])) {
839 return -EINVAL;
840 }
841 }
842
843 *(uint32_t *)(pkt_v + o + 4) = exec->uniforms_p;
844
845 exec->uniforms_u += validated_shader->uniforms_src_size;
846 exec->uniforms_v += validated_shader->uniforms_size;
847 exec->uniforms_p += validated_shader->uniforms_size;
848 }
849
850 for (i = 0; i < nr_attributes; i++) {
851 struct drm_gem_cma_object *vbo =
852 bo[ARRAY_SIZE(shader_reloc_offsets) + i];
853 uint32_t o = 36 + i * 8;
854 uint32_t offset = *(uint32_t *)(pkt_u + o + 0);
855 uint32_t attr_size = *(uint8_t *)(pkt_u + o + 4) + 1;
856 uint32_t stride = *(uint8_t *)(pkt_u + o + 5);
857 uint32_t max_index;
858
859 if (state->addr & 0x8)
860 stride |= (*(uint32_t *)(pkt_u + 100 + i * 4)) & ~0xff;
861
862 if (vbo->base.size < offset ||
863 vbo->base.size - offset < attr_size) {
864 DRM_ERROR("BO offset overflow (%d + %d > %d)\n",
865 offset, attr_size, vbo->base.size);
866 return -EINVAL;
867 }
868
869 if (stride != 0) {
870 max_index = ((vbo->base.size - offset - attr_size) /
871 stride);
872 if (state->max_index > max_index) {
873 DRM_ERROR("primitives use index %d out of "
874 "supplied %d\n",
875 state->max_index, max_index);
876 return -EINVAL;
877 }
878 }
879
880 *(uint32_t *)(pkt_v + o) = vbo->paddr + offset;
881 }
882
883 return 0;
884}
885
886int
887vc4_validate_shader_recs(struct drm_device *dev,
888 struct vc4_exec_info *exec)
889{
890 uint32_t i;
891 int ret = 0;
892
893 for (i = 0; i < exec->shader_state_count; i++) {
894 ret = validate_gl_shader_rec(dev, exec, &exec->shader_state[i]);
895 if (ret)
896 return ret;
897 }
898
899 return ret;
900}
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
new file mode 100644
index 000000000000..f67124b4c534
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
@@ -0,0 +1,513 @@
1/*
2 * Copyright © 2014 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24/**
25 * DOC: Shader validator for VC4.
26 *
27 * The VC4 has no IOMMU between it and system memory, so a user with
28 * access to execute shaders could escalate privilege by overwriting
29 * system memory (using the VPM write address register in the
30 * general-purpose DMA mode) or reading system memory it shouldn't
31 * (reading it as a texture, or uniform data, or vertex data).
32 *
33 * This walks over a shader BO, ensuring that its accesses are
34 * appropriately bounded, and recording how many texture accesses are
35 * made and where so that we can do relocations for them in the
36 * uniform stream.
37 */
38
39#include "vc4_drv.h"
40#include "vc4_qpu_defines.h"
41
42struct vc4_shader_validation_state {
43 struct vc4_texture_sample_info tmu_setup[2];
44 int tmu_write_count[2];
45
46 /* For registers that were last written to by a MIN instruction with
47 * one argument being a uniform, the address of the uniform.
48 * Otherwise, ~0.
49 *
50 * This is used for the validation of direct address memory reads.
51 */
52 uint32_t live_min_clamp_offsets[32 + 32 + 4];
53 bool live_max_clamp_regs[32 + 32 + 4];
54};
55
56static uint32_t
57waddr_to_live_reg_index(uint32_t waddr, bool is_b)
58{
59 if (waddr < 32) {
60 if (is_b)
61 return 32 + waddr;
62 else
63 return waddr;
64 } else if (waddr <= QPU_W_ACC3) {
65 return 64 + waddr - QPU_W_ACC0;
66 } else {
67 return ~0;
68 }
69}
70
71static uint32_t
72raddr_add_a_to_live_reg_index(uint64_t inst)
73{
74 uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
75 uint32_t add_a = QPU_GET_FIELD(inst, QPU_ADD_A);
76 uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
77 uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
78
79 if (add_a == QPU_MUX_A)
80 return raddr_a;
81 else if (add_a == QPU_MUX_B && sig != QPU_SIG_SMALL_IMM)
82 return 32 + raddr_b;
83 else if (add_a <= QPU_MUX_R3)
84 return 64 + add_a;
85 else
86 return ~0;
87}
88
89static bool
90is_tmu_submit(uint32_t waddr)
91{
92 return (waddr == QPU_W_TMU0_S ||
93 waddr == QPU_W_TMU1_S);
94}
95
96static bool
97is_tmu_write(uint32_t waddr)
98{
99 return (waddr >= QPU_W_TMU0_S &&
100 waddr <= QPU_W_TMU1_B);
101}
102
103static bool
104record_texture_sample(struct vc4_validated_shader_info *validated_shader,
105 struct vc4_shader_validation_state *validation_state,
106 int tmu)
107{
108 uint32_t s = validated_shader->num_texture_samples;
109 int i;
110 struct vc4_texture_sample_info *temp_samples;
111
112 temp_samples = krealloc(validated_shader->texture_samples,
113 (s + 1) * sizeof(*temp_samples),
114 GFP_KERNEL);
115 if (!temp_samples)
116 return false;
117
118 memcpy(&temp_samples[s],
119 &validation_state->tmu_setup[tmu],
120 sizeof(*temp_samples));
121
122 validated_shader->num_texture_samples = s + 1;
123 validated_shader->texture_samples = temp_samples;
124
125 for (i = 0; i < 4; i++)
126 validation_state->tmu_setup[tmu].p_offset[i] = ~0;
127
128 return true;
129}
130
131static bool
132check_tmu_write(uint64_t inst,
133 struct vc4_validated_shader_info *validated_shader,
134 struct vc4_shader_validation_state *validation_state,
135 bool is_mul)
136{
137 uint32_t waddr = (is_mul ?
138 QPU_GET_FIELD(inst, QPU_WADDR_MUL) :
139 QPU_GET_FIELD(inst, QPU_WADDR_ADD));
140 uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
141 uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
142 int tmu = waddr > QPU_W_TMU0_B;
143 bool submit = is_tmu_submit(waddr);
144 bool is_direct = submit && validation_state->tmu_write_count[tmu] == 0;
145 uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
146
147 if (is_direct) {
148 uint32_t add_b = QPU_GET_FIELD(inst, QPU_ADD_B);
149 uint32_t clamp_reg, clamp_offset;
150
151 if (sig == QPU_SIG_SMALL_IMM) {
152 DRM_ERROR("direct TMU read used small immediate\n");
153 return false;
154 }
155
156 /* Make sure that this texture load is an add of the base
157 * address of the UBO to a clamped offset within the UBO.
158 */
159 if (is_mul ||
160 QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) {
161 DRM_ERROR("direct TMU load wasn't an add\n");
162 return false;
163 }
164
165 /* We assert that the the clamped address is the first
166 * argument, and the UBO base address is the second argument.
167 * This is arbitrary, but simpler than supporting flipping the
168 * two either way.
169 */
170 clamp_reg = raddr_add_a_to_live_reg_index(inst);
171 if (clamp_reg == ~0) {
172 DRM_ERROR("direct TMU load wasn't clamped\n");
173 return false;
174 }
175
176 clamp_offset = validation_state->live_min_clamp_offsets[clamp_reg];
177 if (clamp_offset == ~0) {
178 DRM_ERROR("direct TMU load wasn't clamped\n");
179 return false;
180 }
181
182 /* Store the clamp value's offset in p1 (see reloc_tex() in
183 * vc4_validate.c).
184 */
185 validation_state->tmu_setup[tmu].p_offset[1] =
186 clamp_offset;
187
188 if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
189 !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) {
190 DRM_ERROR("direct TMU load didn't add to a uniform\n");
191 return false;
192 }
193
194 validation_state->tmu_setup[tmu].is_direct = true;
195 } else {
196 if (raddr_a == QPU_R_UNIF || (sig != QPU_SIG_SMALL_IMM &&
197 raddr_b == QPU_R_UNIF)) {
198 DRM_ERROR("uniform read in the same instruction as "
199 "texture setup.\n");
200 return false;
201 }
202 }
203
204 if (validation_state->tmu_write_count[tmu] >= 4) {
205 DRM_ERROR("TMU%d got too many parameters before dispatch\n",
206 tmu);
207 return false;
208 }
209 validation_state->tmu_setup[tmu].p_offset[validation_state->tmu_write_count[tmu]] =
210 validated_shader->uniforms_size;
211 validation_state->tmu_write_count[tmu]++;
212 /* Since direct uses a RADDR uniform reference, it will get counted in
213 * check_instruction_reads()
214 */
215 if (!is_direct)
216 validated_shader->uniforms_size += 4;
217
218 if (submit) {
219 if (!record_texture_sample(validated_shader,
220 validation_state, tmu)) {
221 return false;
222 }
223
224 validation_state->tmu_write_count[tmu] = 0;
225 }
226
227 return true;
228}
229
230static bool
231check_reg_write(uint64_t inst,
232 struct vc4_validated_shader_info *validated_shader,
233 struct vc4_shader_validation_state *validation_state,
234 bool is_mul)
235{
236 uint32_t waddr = (is_mul ?
237 QPU_GET_FIELD(inst, QPU_WADDR_MUL) :
238 QPU_GET_FIELD(inst, QPU_WADDR_ADD));
239
240 switch (waddr) {
241 case QPU_W_UNIFORMS_ADDRESS:
242 /* XXX: We'll probably need to support this for reladdr, but
243 * it's definitely a security-related one.
244 */
245 DRM_ERROR("uniforms address load unsupported\n");
246 return false;
247
248 case QPU_W_TLB_COLOR_MS:
249 case QPU_W_TLB_COLOR_ALL:
250 case QPU_W_TLB_Z:
251 /* These only interact with the tile buffer, not main memory,
252 * so they're safe.
253 */
254 return true;
255
256 case QPU_W_TMU0_S:
257 case QPU_W_TMU0_T:
258 case QPU_W_TMU0_R:
259 case QPU_W_TMU0_B:
260 case QPU_W_TMU1_S:
261 case QPU_W_TMU1_T:
262 case QPU_W_TMU1_R:
263 case QPU_W_TMU1_B:
264 return check_tmu_write(inst, validated_shader, validation_state,
265 is_mul);
266
267 case QPU_W_HOST_INT:
268 case QPU_W_TMU_NOSWAP:
269 case QPU_W_TLB_ALPHA_MASK:
270 case QPU_W_MUTEX_RELEASE:
271 /* XXX: I haven't thought about these, so don't support them
272 * for now.
273 */
274 DRM_ERROR("Unsupported waddr %d\n", waddr);
275 return false;
276
277 case QPU_W_VPM_ADDR:
278 DRM_ERROR("General VPM DMA unsupported\n");
279 return false;
280
281 case QPU_W_VPM:
282 case QPU_W_VPMVCD_SETUP:
283 /* We allow VPM setup in general, even including VPM DMA
284 * configuration setup, because the (unsafe) DMA can only be
285 * triggered by QPU_W_VPM_ADDR writes.
286 */
287 return true;
288
289 case QPU_W_TLB_STENCIL_SETUP:
290 return true;
291 }
292
293 return true;
294}
295
296static void
297track_live_clamps(uint64_t inst,
298 struct vc4_validated_shader_info *validated_shader,
299 struct vc4_shader_validation_state *validation_state)
300{
301 uint32_t op_add = QPU_GET_FIELD(inst, QPU_OP_ADD);
302 uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
303 uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
304 uint32_t cond_add = QPU_GET_FIELD(inst, QPU_COND_ADD);
305 uint32_t add_a = QPU_GET_FIELD(inst, QPU_ADD_A);
306 uint32_t add_b = QPU_GET_FIELD(inst, QPU_ADD_B);
307 uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
308 uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
309 uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
310 bool ws = inst & QPU_WS;
311 uint32_t lri_add_a, lri_add, lri_mul;
312 bool add_a_is_min_0;
313
314 /* Check whether OP_ADD's A argumennt comes from a live MAX(x, 0),
315 * before we clear previous live state.
316 */
317 lri_add_a = raddr_add_a_to_live_reg_index(inst);
318 add_a_is_min_0 = (lri_add_a != ~0 &&
319 validation_state->live_max_clamp_regs[lri_add_a]);
320
321 /* Clear live state for registers written by our instruction. */
322 lri_add = waddr_to_live_reg_index(waddr_add, ws);
323 lri_mul = waddr_to_live_reg_index(waddr_mul, !ws);
324 if (lri_mul != ~0) {
325 validation_state->live_max_clamp_regs[lri_mul] = false;
326 validation_state->live_min_clamp_offsets[lri_mul] = ~0;
327 }
328 if (lri_add != ~0) {
329 validation_state->live_max_clamp_regs[lri_add] = false;
330 validation_state->live_min_clamp_offsets[lri_add] = ~0;
331 } else {
332 /* Nothing further to do for live tracking, since only ADDs
333 * generate new live clamp registers.
334 */
335 return;
336 }
337
338 /* Now, handle remaining live clamp tracking for the ADD operation. */
339
340 if (cond_add != QPU_COND_ALWAYS)
341 return;
342
343 if (op_add == QPU_A_MAX) {
344 /* Track live clamps of a value to a minimum of 0 (in either
345 * arg).
346 */
347 if (sig != QPU_SIG_SMALL_IMM || raddr_b != 0 ||
348 (add_a != QPU_MUX_B && add_b != QPU_MUX_B)) {
349 return;
350 }
351
352 validation_state->live_max_clamp_regs[lri_add] = true;
353 } else if (op_add == QPU_A_MIN) {
354 /* Track live clamps of a value clamped to a minimum of 0 and
355 * a maximum of some uniform's offset.
356 */
357 if (!add_a_is_min_0)
358 return;
359
360 if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
361 !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF &&
362 sig != QPU_SIG_SMALL_IMM)) {
363 return;
364 }
365
366 validation_state->live_min_clamp_offsets[lri_add] =
367 validated_shader->uniforms_size;
368 }
369}
370
371static bool
372check_instruction_writes(uint64_t inst,
373 struct vc4_validated_shader_info *validated_shader,
374 struct vc4_shader_validation_state *validation_state)
375{
376 uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
377 uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
378 bool ok;
379
380 if (is_tmu_write(waddr_add) && is_tmu_write(waddr_mul)) {
381 DRM_ERROR("ADD and MUL both set up textures\n");
382 return false;
383 }
384
385 ok = (check_reg_write(inst, validated_shader, validation_state,
386 false) &&
387 check_reg_write(inst, validated_shader, validation_state,
388 true));
389
390 track_live_clamps(inst, validated_shader, validation_state);
391
392 return ok;
393}
394
395static bool
396check_instruction_reads(uint64_t inst,
397 struct vc4_validated_shader_info *validated_shader)
398{
399 uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
400 uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
401 uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
402
403 if (raddr_a == QPU_R_UNIF ||
404 (raddr_b == QPU_R_UNIF && sig != QPU_SIG_SMALL_IMM)) {
405 /* This can't overflow the uint32_t, because we're reading 8
406 * bytes of instruction to increment by 4 here, so we'd
407 * already be OOM.
408 */
409 validated_shader->uniforms_size += 4;
410 }
411
412 return true;
413}
414
415struct vc4_validated_shader_info *
416vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
417{
418 bool found_shader_end = false;
419 int shader_end_ip = 0;
420 uint32_t ip, max_ip;
421 uint64_t *shader;
422 struct vc4_validated_shader_info *validated_shader;
423 struct vc4_shader_validation_state validation_state;
424 int i;
425
426 memset(&validation_state, 0, sizeof(validation_state));
427
428 for (i = 0; i < 8; i++)
429 validation_state.tmu_setup[i / 4].p_offset[i % 4] = ~0;
430 for (i = 0; i < ARRAY_SIZE(validation_state.live_min_clamp_offsets); i++)
431 validation_state.live_min_clamp_offsets[i] = ~0;
432
433 shader = shader_obj->vaddr;
434 max_ip = shader_obj->base.size / sizeof(uint64_t);
435
436 validated_shader = kcalloc(1, sizeof(*validated_shader), GFP_KERNEL);
437 if (!validated_shader)
438 return NULL;
439
440 for (ip = 0; ip < max_ip; ip++) {
441 uint64_t inst = shader[ip];
442 uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
443
444 switch (sig) {
445 case QPU_SIG_NONE:
446 case QPU_SIG_WAIT_FOR_SCOREBOARD:
447 case QPU_SIG_SCOREBOARD_UNLOCK:
448 case QPU_SIG_COLOR_LOAD:
449 case QPU_SIG_LOAD_TMU0:
450 case QPU_SIG_LOAD_TMU1:
451 case QPU_SIG_PROG_END:
452 case QPU_SIG_SMALL_IMM:
453 if (!check_instruction_writes(inst, validated_shader,
454 &validation_state)) {
455 DRM_ERROR("Bad write at ip %d\n", ip);
456 goto fail;
457 }
458
459 if (!check_instruction_reads(inst, validated_shader))
460 goto fail;
461
462 if (sig == QPU_SIG_PROG_END) {
463 found_shader_end = true;
464 shader_end_ip = ip;
465 }
466
467 break;
468
469 case QPU_SIG_LOAD_IMM:
470 if (!check_instruction_writes(inst, validated_shader,
471 &validation_state)) {
472 DRM_ERROR("Bad LOAD_IMM write at ip %d\n", ip);
473 goto fail;
474 }
475 break;
476
477 default:
478 DRM_ERROR("Unsupported QPU signal %d at "
479 "instruction %d\n", sig, ip);
480 goto fail;
481 }
482
483 /* There are two delay slots after program end is signaled
484 * that are still executed, then we're finished.
485 */
486 if (found_shader_end && ip == shader_end_ip + 2)
487 break;
488 }
489
490 if (ip == max_ip) {
491 DRM_ERROR("shader failed to terminate before "
492 "shader BO end at %zd\n",
493 shader_obj->base.size);
494 goto fail;
495 }
496
497 /* Again, no chance of integer overflow here because the worst case
498 * scenario is 8 bytes of uniforms plus handles per 8-byte
499 * instruction.
500 */
501 validated_shader->uniforms_src_size =
502 (validated_shader->uniforms_size +
503 4 * validated_shader->num_texture_samples);
504
505 return validated_shader;
506
507fail:
508 if (validated_shader) {
509 kfree(validated_shader->texture_samples);
510 kfree(validated_shader);
511 }
512 return NULL;
513}
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index f545913a56c7..a165f03eaa79 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -215,7 +215,7 @@ static const struct drm_framebuffer_funcs virtio_gpu_fb_funcs = {
215int 215int
216virtio_gpu_framebuffer_init(struct drm_device *dev, 216virtio_gpu_framebuffer_init(struct drm_device *dev,
217 struct virtio_gpu_framebuffer *vgfb, 217 struct virtio_gpu_framebuffer *vgfb,
218 struct drm_mode_fb_cmd2 *mode_cmd, 218 const struct drm_mode_fb_cmd2 *mode_cmd,
219 struct drm_gem_object *obj) 219 struct drm_gem_object *obj)
220{ 220{
221 int ret; 221 int ret;
@@ -374,16 +374,6 @@ static const struct drm_connector_helper_funcs virtio_gpu_conn_helper_funcs = {
374 .best_encoder = virtio_gpu_best_encoder, 374 .best_encoder = virtio_gpu_best_encoder,
375}; 375};
376 376
377static void virtio_gpu_conn_save(struct drm_connector *connector)
378{
379 DRM_DEBUG("\n");
380}
381
382static void virtio_gpu_conn_restore(struct drm_connector *connector)
383{
384 DRM_DEBUG("\n");
385}
386
387static enum drm_connector_status virtio_gpu_conn_detect( 377static enum drm_connector_status virtio_gpu_conn_detect(
388 struct drm_connector *connector, 378 struct drm_connector *connector,
389 bool force) 379 bool force)
@@ -409,8 +399,6 @@ static void virtio_gpu_conn_destroy(struct drm_connector *connector)
409 399
410static const struct drm_connector_funcs virtio_gpu_connector_funcs = { 400static const struct drm_connector_funcs virtio_gpu_connector_funcs = {
411 .dpms = drm_atomic_helper_connector_dpms, 401 .dpms = drm_atomic_helper_connector_dpms,
412 .save = virtio_gpu_conn_save,
413 .restore = virtio_gpu_conn_restore,
414 .detect = virtio_gpu_conn_detect, 402 .detect = virtio_gpu_conn_detect,
415 .fill_modes = drm_helper_probe_single_connector_modes, 403 .fill_modes = drm_helper_probe_single_connector_modes,
416 .destroy = virtio_gpu_conn_destroy, 404 .destroy = virtio_gpu_conn_destroy,
@@ -443,7 +431,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
443 if (IS_ERR(plane)) 431 if (IS_ERR(plane))
444 return PTR_ERR(plane); 432 return PTR_ERR(plane);
445 drm_crtc_init_with_planes(dev, crtc, plane, NULL, 433 drm_crtc_init_with_planes(dev, crtc, plane, NULL,
446 &virtio_gpu_crtc_funcs); 434 &virtio_gpu_crtc_funcs, NULL);
447 drm_mode_crtc_set_gamma_size(crtc, 256); 435 drm_mode_crtc_set_gamma_size(crtc, 256);
448 drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs); 436 drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs);
449 plane->crtc = crtc; 437 plane->crtc = crtc;
@@ -453,7 +441,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
453 drm_connector_helper_add(connector, &virtio_gpu_conn_helper_funcs); 441 drm_connector_helper_add(connector, &virtio_gpu_conn_helper_funcs);
454 442
455 drm_encoder_init(dev, encoder, &virtio_gpu_enc_funcs, 443 drm_encoder_init(dev, encoder, &virtio_gpu_enc_funcs,
456 DRM_MODE_ENCODER_VIRTUAL); 444 DRM_MODE_ENCODER_VIRTUAL, NULL);
457 drm_encoder_helper_add(encoder, &virtio_gpu_enc_helper_funcs); 445 drm_encoder_helper_add(encoder, &virtio_gpu_enc_helper_funcs);
458 encoder->possible_crtcs = 1 << index; 446 encoder->possible_crtcs = 1 << index;
459 447
@@ -465,7 +453,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
465static struct drm_framebuffer * 453static struct drm_framebuffer *
466virtio_gpu_user_framebuffer_create(struct drm_device *dev, 454virtio_gpu_user_framebuffer_create(struct drm_device *dev,
467 struct drm_file *file_priv, 455 struct drm_file *file_priv,
468 struct drm_mode_fb_cmd2 *mode_cmd) 456 const struct drm_mode_fb_cmd2 *mode_cmd)
469{ 457{
470 struct drm_gem_object *obj = NULL; 458 struct drm_gem_object *obj = NULL;
471 struct virtio_gpu_framebuffer *virtio_gpu_fb; 459 struct virtio_gpu_framebuffer *virtio_gpu_fb;
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 79f0abe69b64..8f486f4c7023 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -328,7 +328,7 @@ void virtio_gpu_dequeue_fence_func(struct work_struct *work);
328/* virtio_gpu_display.c */ 328/* virtio_gpu_display.c */
329int virtio_gpu_framebuffer_init(struct drm_device *dev, 329int virtio_gpu_framebuffer_init(struct drm_device *dev,
330 struct virtio_gpu_framebuffer *vgfb, 330 struct virtio_gpu_framebuffer *vgfb,
331 struct drm_mode_fb_cmd2 *mode_cmd, 331 const struct drm_mode_fb_cmd2 *mode_cmd,
332 struct drm_gem_object *obj); 332 struct drm_gem_object *obj);
333int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev); 333int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev);
334void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev); 334void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev);
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
index 6a81e084593b..2242a80866a9 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fb.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
@@ -32,7 +32,6 @@
32struct virtio_gpu_fbdev { 32struct virtio_gpu_fbdev {
33 struct drm_fb_helper helper; 33 struct drm_fb_helper helper;
34 struct virtio_gpu_framebuffer vgfb; 34 struct virtio_gpu_framebuffer vgfb;
35 struct list_head fbdev_list;
36 struct virtio_gpu_device *vgdev; 35 struct virtio_gpu_device *vgdev;
37 struct delayed_work work; 36 struct delayed_work work;
38}; 37};
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 4a74129c5708..572fb351feab 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -107,7 +107,7 @@ struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
107 &virtio_gpu_plane_funcs, 107 &virtio_gpu_plane_funcs,
108 virtio_gpu_formats, 108 virtio_gpu_formats,
109 ARRAY_SIZE(virtio_gpu_formats), 109 ARRAY_SIZE(virtio_gpu_formats),
110 DRM_PLANE_TYPE_PRIMARY); 110 DRM_PLANE_TYPE_PRIMARY, NULL);
111 if (ret) 111 if (ret)
112 goto err_plane_init; 112 goto err_plane_init;
113 113
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 9fcd7f82995c..9394c3535e85 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -930,7 +930,7 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
930 930
931static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, 931static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
932 struct drm_file *file_priv, 932 struct drm_file *file_priv,
933 struct drm_mode_fb_cmd2 *mode_cmd2) 933 const struct drm_mode_fb_cmd2 *mode_cmd2)
934{ 934{
935 struct vmw_private *dev_priv = vmw_priv(dev); 935 struct vmw_private *dev_priv = vmw_priv(dev);
936 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 936 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
@@ -1331,14 +1331,6 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
1331 return 0; 1331 return 0;
1332} 1332}
1333 1333
1334void vmw_du_crtc_save(struct drm_crtc *crtc)
1335{
1336}
1337
1338void vmw_du_crtc_restore(struct drm_crtc *crtc)
1339{
1340}
1341
1342void vmw_du_crtc_gamma_set(struct drm_crtc *crtc, 1334void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
1343 u16 *r, u16 *g, u16 *b, 1335 u16 *r, u16 *g, u16 *b,
1344 uint32_t start, uint32_t size) 1336 uint32_t start, uint32_t size)
@@ -1360,14 +1352,6 @@ int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
1360 return 0; 1352 return 0;
1361} 1353}
1362 1354
1363void vmw_du_connector_save(struct drm_connector *connector)
1364{
1365}
1366
1367void vmw_du_connector_restore(struct drm_connector *connector)
1368{
1369}
1370
1371enum drm_connector_status 1355enum drm_connector_status
1372vmw_du_connector_detect(struct drm_connector *connector, bool force) 1356vmw_du_connector_detect(struct drm_connector *connector, bool force)
1373{ 1357{
@@ -1554,7 +1538,7 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
1554 drm_mode_probed_add(connector, mode); 1538 drm_mode_probed_add(connector, mode);
1555 } 1539 }
1556 1540
1557 drm_mode_connector_list_update(connector, true); 1541 drm_mode_connector_list_update(connector);
1558 /* Move the prefered mode first, help apps pick the right mode. */ 1542 /* Move the prefered mode first, help apps pick the right mode. */
1559 drm_mode_sort(&connector->modes); 1543 drm_mode_sort(&connector->modes);
1560 1544
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index bb63e4d795fa..2aff5e51d926 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -295,8 +295,6 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
295} 295}
296 296
297static struct drm_crtc_funcs vmw_legacy_crtc_funcs = { 297static struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
298 .save = vmw_du_crtc_save,
299 .restore = vmw_du_crtc_restore,
300 .cursor_set = vmw_du_crtc_cursor_set, 298 .cursor_set = vmw_du_crtc_cursor_set,
301 .cursor_move = vmw_du_crtc_cursor_move, 299 .cursor_move = vmw_du_crtc_cursor_move,
302 .gamma_set = vmw_du_crtc_gamma_set, 300 .gamma_set = vmw_du_crtc_gamma_set,
@@ -329,8 +327,6 @@ static void vmw_ldu_connector_destroy(struct drm_connector *connector)
329 327
330static struct drm_connector_funcs vmw_legacy_connector_funcs = { 328static struct drm_connector_funcs vmw_legacy_connector_funcs = {
331 .dpms = vmw_du_connector_dpms, 329 .dpms = vmw_du_connector_dpms,
332 .save = vmw_du_connector_save,
333 .restore = vmw_du_connector_restore,
334 .detect = vmw_du_connector_detect, 330 .detect = vmw_du_connector_detect,
335 .fill_modes = vmw_du_connector_fill_modes, 331 .fill_modes = vmw_du_connector_fill_modes,
336 .set_property = vmw_du_connector_set_property, 332 .set_property = vmw_du_connector_set_property,
@@ -367,7 +363,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
367 connector->status = vmw_du_connector_detect(connector, true); 363 connector->status = vmw_du_connector_detect(connector, true);
368 364
369 drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs, 365 drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
370 DRM_MODE_ENCODER_VIRTUAL); 366 DRM_MODE_ENCODER_VIRTUAL, NULL);
371 drm_mode_connector_attach_encoder(connector, encoder); 367 drm_mode_connector_attach_encoder(connector, encoder);
372 encoder->possible_crtcs = (1 << unit); 368 encoder->possible_crtcs = (1 << unit);
373 encoder->possible_clones = 0; 369 encoder->possible_clones = 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index b96d1ab610c5..6bb7af37934a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -531,8 +531,6 @@ out_no_fence:
531} 531}
532 532
533static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = { 533static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
534 .save = vmw_du_crtc_save,
535 .restore = vmw_du_crtc_restore,
536 .cursor_set = vmw_du_crtc_cursor_set, 534 .cursor_set = vmw_du_crtc_cursor_set,
537 .cursor_move = vmw_du_crtc_cursor_move, 535 .cursor_move = vmw_du_crtc_cursor_move,
538 .gamma_set = vmw_du_crtc_gamma_set, 536 .gamma_set = vmw_du_crtc_gamma_set,
@@ -565,10 +563,6 @@ static void vmw_sou_connector_destroy(struct drm_connector *connector)
565 563
566static struct drm_connector_funcs vmw_sou_connector_funcs = { 564static struct drm_connector_funcs vmw_sou_connector_funcs = {
567 .dpms = vmw_du_connector_dpms, 565 .dpms = vmw_du_connector_dpms,
568 .save = vmw_du_connector_save,
569 .restore = vmw_du_connector_restore,
570 .detect = vmw_du_connector_detect,
571 .fill_modes = vmw_du_connector_fill_modes,
572 .set_property = vmw_du_connector_set_property, 566 .set_property = vmw_du_connector_set_property,
573 .destroy = vmw_sou_connector_destroy, 567 .destroy = vmw_sou_connector_destroy,
574}; 568};
@@ -603,7 +597,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
603 connector->status = vmw_du_connector_detect(connector, true); 597 connector->status = vmw_du_connector_detect(connector, true);
604 598
605 drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs, 599 drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs,
606 DRM_MODE_ENCODER_VIRTUAL); 600 DRM_MODE_ENCODER_VIRTUAL, NULL);
607 drm_mode_connector_attach_encoder(connector, encoder); 601 drm_mode_connector_attach_encoder(connector, encoder);
608 encoder->possible_crtcs = (1 << unit); 602 encoder->possible_crtcs = (1 << unit);
609 encoder->possible_clones = 0; 603 encoder->possible_clones = 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index b1fc1c02792d..45e72c2f15cd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1041,8 +1041,6 @@ out_finish:
1041 * Screen Target CRTC dispatch table 1041 * Screen Target CRTC dispatch table
1042 */ 1042 */
1043static struct drm_crtc_funcs vmw_stdu_crtc_funcs = { 1043static struct drm_crtc_funcs vmw_stdu_crtc_funcs = {
1044 .save = vmw_du_crtc_save,
1045 .restore = vmw_du_crtc_restore,
1046 .cursor_set = vmw_du_crtc_cursor_set, 1044 .cursor_set = vmw_du_crtc_cursor_set,
1047 .cursor_move = vmw_du_crtc_cursor_move, 1045 .cursor_move = vmw_du_crtc_cursor_move,
1048 .gamma_set = vmw_du_crtc_gamma_set, 1046 .gamma_set = vmw_du_crtc_gamma_set,
@@ -1101,8 +1099,6 @@ static void vmw_stdu_connector_destroy(struct drm_connector *connector)
1101 1099
1102static struct drm_connector_funcs vmw_stdu_connector_funcs = { 1100static struct drm_connector_funcs vmw_stdu_connector_funcs = {
1103 .dpms = vmw_du_connector_dpms, 1101 .dpms = vmw_du_connector_dpms,
1104 .save = vmw_du_connector_save,
1105 .restore = vmw_du_connector_restore,
1106 .detect = vmw_du_connector_detect, 1102 .detect = vmw_du_connector_detect,
1107 .fill_modes = vmw_du_connector_fill_modes, 1103 .fill_modes = vmw_du_connector_fill_modes,
1108 .set_property = vmw_du_connector_set_property, 1104 .set_property = vmw_du_connector_set_property,
@@ -1149,7 +1145,7 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
1149 connector->status = vmw_du_connector_detect(connector, false); 1145 connector->status = vmw_du_connector_detect(connector, false);
1150 1146
1151 drm_encoder_init(dev, encoder, &vmw_stdu_encoder_funcs, 1147 drm_encoder_init(dev, encoder, &vmw_stdu_encoder_funcs,
1152 DRM_MODE_ENCODER_VIRTUAL); 1148 DRM_MODE_ENCODER_VIRTUAL, NULL);
1153 drm_mode_connector_attach_encoder(connector, encoder); 1149 drm_mode_connector_attach_encoder(connector, encoder);
1154 encoder->possible_crtcs = (1 << unit); 1150 encoder->possible_crtcs = (1 << unit);
1155 encoder->possible_clones = 0; 1151 encoder->possible_clones = 0;
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index ba47b30d28fa..f2e13eb8339f 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -28,6 +28,7 @@
28#include <linux/irqchip/chained_irq.h> 28#include <linux/irqchip/chained_irq.h>
29#include <linux/irqdomain.h> 29#include <linux/irqdomain.h>
30#include <linux/of_device.h> 30#include <linux/of_device.h>
31#include <linux/of_graph.h>
31 32
32#include <drm/drm_fourcc.h> 33#include <drm/drm_fourcc.h>
33 34
@@ -993,12 +994,26 @@ static void platform_device_unregister_children(struct platform_device *pdev)
993struct ipu_platform_reg { 994struct ipu_platform_reg {
994 struct ipu_client_platformdata pdata; 995 struct ipu_client_platformdata pdata;
995 const char *name; 996 const char *name;
996 int reg_offset;
997}; 997};
998 998
999/* These must be in the order of the corresponding device tree port nodes */
999static const struct ipu_platform_reg client_reg[] = { 1000static const struct ipu_platform_reg client_reg[] = {
1000 { 1001 {
1001 .pdata = { 1002 .pdata = {
1003 .csi = 0,
1004 .dma[0] = IPUV3_CHANNEL_CSI0,
1005 .dma[1] = -EINVAL,
1006 },
1007 .name = "imx-ipuv3-camera",
1008 }, {
1009 .pdata = {
1010 .csi = 1,
1011 .dma[0] = IPUV3_CHANNEL_CSI1,
1012 .dma[1] = -EINVAL,
1013 },
1014 .name = "imx-ipuv3-camera",
1015 }, {
1016 .pdata = {
1002 .di = 0, 1017 .di = 0,
1003 .dc = 5, 1018 .dc = 5,
1004 .dp = IPU_DP_FLOW_SYNC_BG, 1019 .dp = IPU_DP_FLOW_SYNC_BG,
@@ -1015,22 +1030,6 @@ static const struct ipu_platform_reg client_reg[] = {
1015 .dma[1] = -EINVAL, 1030 .dma[1] = -EINVAL,
1016 }, 1031 },
1017 .name = "imx-ipuv3-crtc", 1032 .name = "imx-ipuv3-crtc",
1018 }, {
1019 .pdata = {
1020 .csi = 0,
1021 .dma[0] = IPUV3_CHANNEL_CSI0,
1022 .dma[1] = -EINVAL,
1023 },
1024 .reg_offset = IPU_CM_CSI0_REG_OFS,
1025 .name = "imx-ipuv3-camera",
1026 }, {
1027 .pdata = {
1028 .csi = 1,
1029 .dma[0] = IPUV3_CHANNEL_CSI1,
1030 .dma[1] = -EINVAL,
1031 },
1032 .reg_offset = IPU_CM_CSI1_REG_OFS,
1033 .name = "imx-ipuv3-camera",
1034 }, 1033 },
1035}; 1034};
1036 1035
@@ -1051,22 +1050,30 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
1051 for (i = 0; i < ARRAY_SIZE(client_reg); i++) { 1050 for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
1052 const struct ipu_platform_reg *reg = &client_reg[i]; 1051 const struct ipu_platform_reg *reg = &client_reg[i];
1053 struct platform_device *pdev; 1052 struct platform_device *pdev;
1054 struct resource res; 1053
1055 1054 pdev = platform_device_alloc(reg->name, id++);
1056 if (reg->reg_offset) { 1055 if (!pdev) {
1057 memset(&res, 0, sizeof(res)); 1056 ret = -ENOMEM;
1058 res.flags = IORESOURCE_MEM; 1057 goto err_register;
1059 res.start = ipu_base + ipu->devtype->cm_ofs + reg->reg_offset; 1058 }
1060 res.end = res.start + PAGE_SIZE - 1; 1059
1061 pdev = platform_device_register_resndata(dev, reg->name, 1060 pdev->dev.parent = dev;
1062 id++, &res, 1, &reg->pdata, sizeof(reg->pdata)); 1061
1063 } else { 1062 /* Associate subdevice with the corresponding port node */
1064 pdev = platform_device_register_data(dev, reg->name, 1063 pdev->dev.of_node = of_graph_get_port_by_id(dev->of_node, i);
1065 id++, &reg->pdata, sizeof(reg->pdata)); 1064 if (!pdev->dev.of_node) {
1065 dev_err(dev, "missing port@%d node in %s\n", i,
1066 dev->of_node->full_name);
1067 ret = -ENODEV;
1068 goto err_register;
1066 } 1069 }
1067 1070
1068 if (IS_ERR(pdev)) { 1071 ret = platform_device_add_data(pdev, &reg->pdata,
1069 ret = PTR_ERR(pdev); 1072 sizeof(reg->pdata));
1073 if (!ret)
1074 ret = platform_device_add(pdev);
1075 if (ret) {
1076 platform_device_put(pdev);
1070 goto err_register; 1077 goto err_register;
1071 } 1078 }
1072 } 1079 }
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index ac1feea51be3..9024a3de4032 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -609,6 +609,7 @@
609#define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110 609#define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110
610#define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f 610#define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f
611#define USB_DEVICE_ID_LOGITECH_HARMONY_PS3 0x0306 611#define USB_DEVICE_ID_LOGITECH_HARMONY_PS3 0x0306
612#define USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS 0xc24d
612#define USB_DEVICE_ID_LOGITECH_MOUSE_C01A 0xc01a 613#define USB_DEVICE_ID_LOGITECH_MOUSE_C01A 0xc01a
613#define USB_DEVICE_ID_LOGITECH_MOUSE_C05A 0xc05a 614#define USB_DEVICE_ID_LOGITECH_MOUSE_C05A 0xc05a
614#define USB_DEVICE_ID_LOGITECH_MOUSE_C06A 0xc06a 615#define USB_DEVICE_ID_LOGITECH_MOUSE_C06A 0xc06a
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index c20ac76c0a8c..c690fae02cf8 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -665,8 +665,9 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
665 struct lg_drv_data *drv_data; 665 struct lg_drv_data *drv_data;
666 int ret; 666 int ret;
667 667
668 /* Only work with the 1st interface (G29 presents multiple) */ 668 /* G29 only work with the 1st interface */
669 if (iface_num != 0) { 669 if ((hdev->product == USB_DEVICE_ID_LOGITECH_G29_WHEEL) &&
670 (iface_num != 0)) {
670 dbg_hid("%s: ignoring ifnum %d\n", __func__, iface_num); 671 dbg_hid("%s: ignoring ifnum %d\n", __func__, iface_num);
671 return -ENODEV; 672 return -ENODEV;
672 } 673 }
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 94bb137abe32..2324520b006d 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -84,6 +84,7 @@ static const struct hid_blacklist {
84 { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL }, 84 { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL },
85 { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, 85 { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
86 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL }, 86 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL },
87 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS, HID_QUIRK_NOGET },
87 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C01A, HID_QUIRK_ALWAYS_POLL }, 88 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C01A, HID_QUIRK_ALWAYS_POLL },
88 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A, HID_QUIRK_ALWAYS_POLL }, 89 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A, HID_QUIRK_ALWAYS_POLL },
89 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A, HID_QUIRK_ALWAYS_POLL }, 90 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A, HID_QUIRK_ALWAYS_POLL },
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 8b29949507d1..01a4f05c1642 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2481,7 +2481,7 @@ void wacom_setup_device_quirks(struct wacom *wacom)
2481 if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) { 2481 if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
2482 if (features->touch_max) 2482 if (features->touch_max)
2483 features->device_type |= WACOM_DEVICETYPE_TOUCH; 2483 features->device_type |= WACOM_DEVICETYPE_TOUCH;
2484 if (features->type >= INTUOSHT || features->type <= BAMBOO_PT) 2484 if (features->type >= INTUOSHT && features->type <= BAMBOO_PT)
2485 features->device_type |= WACOM_DEVICETYPE_PAD; 2485 features->device_type |= WACOM_DEVICETYPE_PAD;
2486 2486
2487 features->x_max = 4096; 2487 features->x_max = 4096;
@@ -3213,7 +3213,8 @@ static const struct wacom_features wacom_features_0x32F =
3213 WACOM_DTU_OFFSET, WACOM_DTU_OFFSET }; 3213 WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
3214static const struct wacom_features wacom_features_0x336 = 3214static const struct wacom_features wacom_features_0x336 =
3215 { "Wacom DTU1141", 23472, 13203, 1023, 0, 3215 { "Wacom DTU1141", 23472, 13203, 1023, 0,
3216 DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4 }; 3216 DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
3217 WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
3217static const struct wacom_features wacom_features_0x57 = 3218static const struct wacom_features wacom_features_0x57 =
3218 { "Wacom DTK2241", 95640, 54060, 2047, 63, 3219 { "Wacom DTK2241", 95640, 54060, 2047, 63,
3219 DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 6, 3220 DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 6,
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 842b0043ad94..8f59f057cdf4 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -324,6 +324,7 @@ config SENSORS_APPLESMC
324config SENSORS_ARM_SCPI 324config SENSORS_ARM_SCPI
325 tristate "ARM SCPI Sensors" 325 tristate "ARM SCPI Sensors"
326 depends on ARM_SCPI_PROTOCOL 326 depends on ARM_SCPI_PROTOCOL
327 depends on THERMAL || !THERMAL_OF
327 help 328 help
328 This driver provides support for temperature, voltage, current 329 This driver provides support for temperature, voltage, current
329 and power sensors available on ARM Ltd's SCP based platforms. The 330 and power sensors available on ARM Ltd's SCP based platforms. The
@@ -1471,6 +1472,7 @@ config SENSORS_INA209
1471config SENSORS_INA2XX 1472config SENSORS_INA2XX
1472 tristate "Texas Instruments INA219 and compatibles" 1473 tristate "Texas Instruments INA219 and compatibles"
1473 depends on I2C 1474 depends on I2C
1475 select REGMAP_I2C
1474 help 1476 help
1475 If you say yes here you get support for INA219, INA220, INA226, 1477 If you say yes here you get support for INA219, INA220, INA226,
1476 INA230, and INA231 power monitor chips. 1478 INA230, and INA231 power monitor chips.
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 1f5e956941b1..0af7fd311979 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -537,7 +537,7 @@ static int applesmc_init_index(struct applesmc_registers *s)
537static int applesmc_init_smcreg_try(void) 537static int applesmc_init_smcreg_try(void)
538{ 538{
539 struct applesmc_registers *s = &smcreg; 539 struct applesmc_registers *s = &smcreg;
540 bool left_light_sensor, right_light_sensor; 540 bool left_light_sensor = 0, right_light_sensor = 0;
541 unsigned int count; 541 unsigned int count;
542 u8 tmp[1]; 542 u8 tmp[1];
543 int ret; 543 int ret;
diff --git a/drivers/hwmon/scpi-hwmon.c b/drivers/hwmon/scpi-hwmon.c
index 2c1241bbf9af..7e20567bc369 100644
--- a/drivers/hwmon/scpi-hwmon.c
+++ b/drivers/hwmon/scpi-hwmon.c
@@ -117,7 +117,7 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
117 struct scpi_ops *scpi_ops; 117 struct scpi_ops *scpi_ops;
118 struct device *hwdev, *dev = &pdev->dev; 118 struct device *hwdev, *dev = &pdev->dev;
119 struct scpi_sensors *scpi_sensors; 119 struct scpi_sensors *scpi_sensors;
120 int ret; 120 int ret, idx;
121 121
122 scpi_ops = get_scpi_ops(); 122 scpi_ops = get_scpi_ops();
123 if (!scpi_ops) 123 if (!scpi_ops)
@@ -146,8 +146,8 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
146 146
147 scpi_sensors->scpi_ops = scpi_ops; 147 scpi_sensors->scpi_ops = scpi_ops;
148 148
149 for (i = 0; i < nr_sensors; i++) { 149 for (i = 0, idx = 0; i < nr_sensors; i++) {
150 struct sensor_data *sensor = &scpi_sensors->data[i]; 150 struct sensor_data *sensor = &scpi_sensors->data[idx];
151 151
152 ret = scpi_ops->sensor_get_info(i, &sensor->info); 152 ret = scpi_ops->sensor_get_info(i, &sensor->info);
153 if (ret) 153 if (ret)
@@ -183,7 +183,7 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
183 num_power++; 183 num_power++;
184 break; 184 break;
185 default: 185 default:
186 break; 186 continue;
187 } 187 }
188 188
189 sensor->dev_attr_input.attr.mode = S_IRUGO; 189 sensor->dev_attr_input.attr.mode = S_IRUGO;
@@ -194,11 +194,12 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
194 sensor->dev_attr_label.show = scpi_show_label; 194 sensor->dev_attr_label.show = scpi_show_label;
195 sensor->dev_attr_label.attr.name = sensor->label; 195 sensor->dev_attr_label.attr.name = sensor->label;
196 196
197 scpi_sensors->attrs[i << 1] = &sensor->dev_attr_input.attr; 197 scpi_sensors->attrs[idx << 1] = &sensor->dev_attr_input.attr;
198 scpi_sensors->attrs[(i << 1) + 1] = &sensor->dev_attr_label.attr; 198 scpi_sensors->attrs[(idx << 1) + 1] = &sensor->dev_attr_label.attr;
199 199
200 sysfs_attr_init(scpi_sensors->attrs[i << 1]); 200 sysfs_attr_init(scpi_sensors->attrs[idx << 1]);
201 sysfs_attr_init(scpi_sensors->attrs[(i << 1) + 1]); 201 sysfs_attr_init(scpi_sensors->attrs[(idx << 1) + 1]);
202 idx++;
202 } 203 }
203 204
204 scpi_sensors->group.attrs = scpi_sensors->attrs; 205 scpi_sensors->group.attrs = scpi_sensors->attrs;
@@ -236,8 +237,8 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
236 237
237 zone->sensor_id = i; 238 zone->sensor_id = i;
238 zone->scpi_sensors = scpi_sensors; 239 zone->scpi_sensors = scpi_sensors;
239 zone->tzd = thermal_zone_of_sensor_register(dev, i, zone, 240 zone->tzd = thermal_zone_of_sensor_register(dev,
240 &scpi_sensor_ops); 241 sensor->info.sensor_id, zone, &scpi_sensor_ops);
241 /* 242 /*
242 * The call to thermal_zone_of_sensor_register returns 243 * The call to thermal_zone_of_sensor_register returns
243 * an error for sensors that are not associated with 244 * an error for sensors that are not associated with
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index e24c2b680b47..7b0aa82ea38b 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -126,6 +126,7 @@ config I2C_I801
126 Sunrise Point-LP (PCH) 126 Sunrise Point-LP (PCH)
127 DNV (SOC) 127 DNV (SOC)
128 Broxton (SOC) 128 Broxton (SOC)
129 Lewisburg (PCH)
129 130
130 This driver can also be built as a module. If so, the module 131 This driver can also be built as a module. If so, the module
131 will be called i2c-i801. 132 will be called i2c-i801.
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index c306751ceadb..f62d69799a9c 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -62,6 +62,8 @@
62 * Sunrise Point-LP (PCH) 0x9d23 32 hard yes yes yes 62 * Sunrise Point-LP (PCH) 0x9d23 32 hard yes yes yes
63 * DNV (SOC) 0x19df 32 hard yes yes yes 63 * DNV (SOC) 0x19df 32 hard yes yes yes
64 * Broxton (SOC) 0x5ad4 32 hard yes yes yes 64 * Broxton (SOC) 0x5ad4 32 hard yes yes yes
65 * Lewisburg (PCH) 0xa1a3 32 hard yes yes yes
66 * Lewisburg Supersku (PCH) 0xa223 32 hard yes yes yes
65 * 67 *
66 * Features supported by this driver: 68 * Features supported by this driver:
67 * Software PEC no 69 * Software PEC no
@@ -206,6 +208,8 @@
206#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS 0x9d23 208#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS 0x9d23
207#define PCI_DEVICE_ID_INTEL_DNV_SMBUS 0x19df 209#define PCI_DEVICE_ID_INTEL_DNV_SMBUS 0x19df
208#define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4 210#define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4
211#define PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS 0xa1a3
212#define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS 0xa223
209 213
210struct i801_mux_config { 214struct i801_mux_config {
211 char *gpio_chip; 215 char *gpio_chip;
@@ -869,6 +873,8 @@ static const struct pci_device_id i801_ids[] = {
869 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) }, 873 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) },
870 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMBUS) }, 874 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMBUS) },
871 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) }, 875 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) },
876 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS) },
877 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS) },
872 { 0, } 878 { 0, }
873}; 879};
874 880
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 1e4d99da4164..9bb0b056b25f 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -50,6 +50,7 @@
50#include <linux/of_device.h> 50#include <linux/of_device.h>
51#include <linux/of_dma.h> 51#include <linux/of_dma.h>
52#include <linux/of_gpio.h> 52#include <linux/of_gpio.h>
53#include <linux/pinctrl/consumer.h>
53#include <linux/platform_data/i2c-imx.h> 54#include <linux/platform_data/i2c-imx.h>
54#include <linux/platform_device.h> 55#include <linux/platform_device.h>
55#include <linux/sched.h> 56#include <linux/sched.h>
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index e23a7b068c60..0b20449e48cf 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -662,8 +662,10 @@ static void __xiic_start_xfer(struct xiic_i2c *i2c)
662 662
663static void xiic_start_xfer(struct xiic_i2c *i2c) 663static void xiic_start_xfer(struct xiic_i2c *i2c)
664{ 664{
665 665 spin_lock(&i2c->lock);
666 xiic_reinit(i2c);
666 __xiic_start_xfer(i2c); 667 __xiic_start_xfer(i2c);
668 spin_unlock(&i2c->lock);
667} 669}
668 670
669static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) 671static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 040af5cc8143..ba8eb087f224 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -715,7 +715,7 @@ static int i2c_device_probe(struct device *dev)
715 if (wakeirq > 0 && wakeirq != client->irq) 715 if (wakeirq > 0 && wakeirq != client->irq)
716 status = dev_pm_set_dedicated_wake_irq(dev, wakeirq); 716 status = dev_pm_set_dedicated_wake_irq(dev, wakeirq);
717 else if (client->irq > 0) 717 else if (client->irq > 0)
718 status = dev_pm_set_wake_irq(dev, wakeirq); 718 status = dev_pm_set_wake_irq(dev, client->irq);
719 else 719 else
720 status = 0; 720 status = 0;
721 721
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
index eea0c79111e7..4d960d3b93c0 100644
--- a/drivers/iio/adc/ad7793.c
+++ b/drivers/iio/adc/ad7793.c
@@ -101,7 +101,7 @@
101#define AD7795_CH_AIN1M_AIN1M 8 /* AIN1(-) - AIN1(-) */ 101#define AD7795_CH_AIN1M_AIN1M 8 /* AIN1(-) - AIN1(-) */
102 102
103/* ID Register Bit Designations (AD7793_REG_ID) */ 103/* ID Register Bit Designations (AD7793_REG_ID) */
104#define AD7785_ID 0xB 104#define AD7785_ID 0x3
105#define AD7792_ID 0xA 105#define AD7792_ID 0xA
106#define AD7793_ID 0xB 106#define AD7793_ID 0xB
107#define AD7794_ID 0xF 107#define AD7794_ID 0xF
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index 599cde3d03a1..b10f629cc44b 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -106,6 +106,13 @@
106 106
107#define DEFAULT_SAMPLE_TIME 1000 107#define DEFAULT_SAMPLE_TIME 1000
108 108
109/* V at 25°C of 696 mV */
110#define VF610_VTEMP25_3V0 950
111/* V at 25°C of 699 mV */
112#define VF610_VTEMP25_3V3 867
113/* Typical sensor slope coefficient at all temperatures */
114#define VF610_TEMP_SLOPE_COEFF 1840
115
109enum clk_sel { 116enum clk_sel {
110 VF610_ADCIOC_BUSCLK_SET, 117 VF610_ADCIOC_BUSCLK_SET,
111 VF610_ADCIOC_ALTCLK_SET, 118 VF610_ADCIOC_ALTCLK_SET,
@@ -197,6 +204,8 @@ static inline void vf610_adc_calculate_rates(struct vf610_adc *info)
197 adc_feature->clk_div = 8; 204 adc_feature->clk_div = 8;
198 } 205 }
199 206
207 adck_rate = ipg_rate / adc_feature->clk_div;
208
200 /* 209 /*
201 * Determine the long sample time adder value to be used based 210 * Determine the long sample time adder value to be used based
202 * on the default minimum sample time provided. 211 * on the default minimum sample time provided.
@@ -221,7 +230,6 @@ static inline void vf610_adc_calculate_rates(struct vf610_adc *info)
221 * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode 230 * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode
222 * LSTAdder(Long Sample Time): 3, 5, 7, 9, 13, 17, 21, 25 ADCK cycles 231 * LSTAdder(Long Sample Time): 3, 5, 7, 9, 13, 17, 21, 25 ADCK cycles
223 */ 232 */
224 adck_rate = ipg_rate / info->adc_feature.clk_div;
225 for (i = 0; i < ARRAY_SIZE(vf610_hw_avgs); i++) 233 for (i = 0; i < ARRAY_SIZE(vf610_hw_avgs); i++)
226 info->sample_freq_avail[i] = 234 info->sample_freq_avail[i] =
227 adck_rate / (6 + vf610_hw_avgs[i] * 235 adck_rate / (6 + vf610_hw_avgs[i] *
@@ -663,11 +671,13 @@ static int vf610_read_raw(struct iio_dev *indio_dev,
663 break; 671 break;
664 case IIO_TEMP: 672 case IIO_TEMP:
665 /* 673 /*
666 * Calculate in degree Celsius times 1000 674 * Calculate in degree Celsius times 1000
667 * Using sensor slope of 1.84 mV/°C and 675 * Using the typical sensor slope of 1.84 mV/°C
668 * V at 25°C of 696 mV 676 * and VREFH_ADC at 3.3V, V at 25°C of 699 mV
669 */ 677 */
670 *val = 25000 - ((int)info->value - 864) * 1000000 / 1840; 678 *val = 25000 - ((int)info->value - VF610_VTEMP25_3V3) *
679 1000000 / VF610_TEMP_SLOPE_COEFF;
680
671 break; 681 break;
672 default: 682 default:
673 mutex_unlock(&indio_dev->mlock); 683 mutex_unlock(&indio_dev->mlock);
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index 0370624a35db..02e636a1c49a 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -841,6 +841,7 @@ static int xadc_read_raw(struct iio_dev *indio_dev,
841 case XADC_REG_VCCINT: 841 case XADC_REG_VCCINT:
842 case XADC_REG_VCCAUX: 842 case XADC_REG_VCCAUX:
843 case XADC_REG_VREFP: 843 case XADC_REG_VREFP:
844 case XADC_REG_VREFN:
844 case XADC_REG_VCCBRAM: 845 case XADC_REG_VCCBRAM:
845 case XADC_REG_VCCPINT: 846 case XADC_REG_VCCPINT:
846 case XADC_REG_VCCPAUX: 847 case XADC_REG_VCCPAUX:
diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c
index 9e4d2c18b554..81ca0081a019 100644
--- a/drivers/iio/dac/ad5064.c
+++ b/drivers/iio/dac/ad5064.c
@@ -113,12 +113,16 @@ enum ad5064_type {
113 ID_AD5065, 113 ID_AD5065,
114 ID_AD5628_1, 114 ID_AD5628_1,
115 ID_AD5628_2, 115 ID_AD5628_2,
116 ID_AD5629_1,
117 ID_AD5629_2,
116 ID_AD5648_1, 118 ID_AD5648_1,
117 ID_AD5648_2, 119 ID_AD5648_2,
118 ID_AD5666_1, 120 ID_AD5666_1,
119 ID_AD5666_2, 121 ID_AD5666_2,
120 ID_AD5668_1, 122 ID_AD5668_1,
121 ID_AD5668_2, 123 ID_AD5668_2,
124 ID_AD5669_1,
125 ID_AD5669_2,
122}; 126};
123 127
124static int ad5064_write(struct ad5064_state *st, unsigned int cmd, 128static int ad5064_write(struct ad5064_state *st, unsigned int cmd,
@@ -291,7 +295,7 @@ static const struct iio_chan_spec_ext_info ad5064_ext_info[] = {
291 { }, 295 { },
292}; 296};
293 297
294#define AD5064_CHANNEL(chan, addr, bits) { \ 298#define AD5064_CHANNEL(chan, addr, bits, _shift) { \
295 .type = IIO_VOLTAGE, \ 299 .type = IIO_VOLTAGE, \
296 .indexed = 1, \ 300 .indexed = 1, \
297 .output = 1, \ 301 .output = 1, \
@@ -303,36 +307,39 @@ static const struct iio_chan_spec_ext_info ad5064_ext_info[] = {
303 .sign = 'u', \ 307 .sign = 'u', \
304 .realbits = (bits), \ 308 .realbits = (bits), \
305 .storagebits = 16, \ 309 .storagebits = 16, \
306 .shift = 20 - bits, \ 310 .shift = (_shift), \
307 }, \ 311 }, \
308 .ext_info = ad5064_ext_info, \ 312 .ext_info = ad5064_ext_info, \
309} 313}
310 314
311#define DECLARE_AD5064_CHANNELS(name, bits) \ 315#define DECLARE_AD5064_CHANNELS(name, bits, shift) \
312const struct iio_chan_spec name[] = { \ 316const struct iio_chan_spec name[] = { \
313 AD5064_CHANNEL(0, 0, bits), \ 317 AD5064_CHANNEL(0, 0, bits, shift), \
314 AD5064_CHANNEL(1, 1, bits), \ 318 AD5064_CHANNEL(1, 1, bits, shift), \
315 AD5064_CHANNEL(2, 2, bits), \ 319 AD5064_CHANNEL(2, 2, bits, shift), \
316 AD5064_CHANNEL(3, 3, bits), \ 320 AD5064_CHANNEL(3, 3, bits, shift), \
317 AD5064_CHANNEL(4, 4, bits), \ 321 AD5064_CHANNEL(4, 4, bits, shift), \
318 AD5064_CHANNEL(5, 5, bits), \ 322 AD5064_CHANNEL(5, 5, bits, shift), \
319 AD5064_CHANNEL(6, 6, bits), \ 323 AD5064_CHANNEL(6, 6, bits, shift), \
320 AD5064_CHANNEL(7, 7, bits), \ 324 AD5064_CHANNEL(7, 7, bits, shift), \
321} 325}
322 326
323#define DECLARE_AD5065_CHANNELS(name, bits) \ 327#define DECLARE_AD5065_CHANNELS(name, bits, shift) \
324const struct iio_chan_spec name[] = { \ 328const struct iio_chan_spec name[] = { \
325 AD5064_CHANNEL(0, 0, bits), \ 329 AD5064_CHANNEL(0, 0, bits, shift), \
326 AD5064_CHANNEL(1, 3, bits), \ 330 AD5064_CHANNEL(1, 3, bits, shift), \
327} 331}
328 332
329static DECLARE_AD5064_CHANNELS(ad5024_channels, 12); 333static DECLARE_AD5064_CHANNELS(ad5024_channels, 12, 8);
330static DECLARE_AD5064_CHANNELS(ad5044_channels, 14); 334static DECLARE_AD5064_CHANNELS(ad5044_channels, 14, 6);
331static DECLARE_AD5064_CHANNELS(ad5064_channels, 16); 335static DECLARE_AD5064_CHANNELS(ad5064_channels, 16, 4);
332 336
333static DECLARE_AD5065_CHANNELS(ad5025_channels, 12); 337static DECLARE_AD5065_CHANNELS(ad5025_channels, 12, 8);
334static DECLARE_AD5065_CHANNELS(ad5045_channels, 14); 338static DECLARE_AD5065_CHANNELS(ad5045_channels, 14, 6);
335static DECLARE_AD5065_CHANNELS(ad5065_channels, 16); 339static DECLARE_AD5065_CHANNELS(ad5065_channels, 16, 4);
340
341static DECLARE_AD5064_CHANNELS(ad5629_channels, 12, 4);
342static DECLARE_AD5064_CHANNELS(ad5669_channels, 16, 0);
336 343
337static const struct ad5064_chip_info ad5064_chip_info_tbl[] = { 344static const struct ad5064_chip_info ad5064_chip_info_tbl[] = {
338 [ID_AD5024] = { 345 [ID_AD5024] = {
@@ -382,6 +389,18 @@ static const struct ad5064_chip_info ad5064_chip_info_tbl[] = {
382 .channels = ad5024_channels, 389 .channels = ad5024_channels,
383 .num_channels = 8, 390 .num_channels = 8,
384 }, 391 },
392 [ID_AD5629_1] = {
393 .shared_vref = true,
394 .internal_vref = 2500000,
395 .channels = ad5629_channels,
396 .num_channels = 8,
397 },
398 [ID_AD5629_2] = {
399 .shared_vref = true,
400 .internal_vref = 5000000,
401 .channels = ad5629_channels,
402 .num_channels = 8,
403 },
385 [ID_AD5648_1] = { 404 [ID_AD5648_1] = {
386 .shared_vref = true, 405 .shared_vref = true,
387 .internal_vref = 2500000, 406 .internal_vref = 2500000,
@@ -418,6 +437,18 @@ static const struct ad5064_chip_info ad5064_chip_info_tbl[] = {
418 .channels = ad5064_channels, 437 .channels = ad5064_channels,
419 .num_channels = 8, 438 .num_channels = 8,
420 }, 439 },
440 [ID_AD5669_1] = {
441 .shared_vref = true,
442 .internal_vref = 2500000,
443 .channels = ad5669_channels,
444 .num_channels = 8,
445 },
446 [ID_AD5669_2] = {
447 .shared_vref = true,
448 .internal_vref = 5000000,
449 .channels = ad5669_channels,
450 .num_channels = 8,
451 },
421}; 452};
422 453
423static inline unsigned int ad5064_num_vref(struct ad5064_state *st) 454static inline unsigned int ad5064_num_vref(struct ad5064_state *st)
@@ -597,10 +628,16 @@ static int ad5064_i2c_write(struct ad5064_state *st, unsigned int cmd,
597 unsigned int addr, unsigned int val) 628 unsigned int addr, unsigned int val)
598{ 629{
599 struct i2c_client *i2c = to_i2c_client(st->dev); 630 struct i2c_client *i2c = to_i2c_client(st->dev);
631 int ret;
600 632
601 st->data.i2c[0] = (cmd << 4) | addr; 633 st->data.i2c[0] = (cmd << 4) | addr;
602 put_unaligned_be16(val, &st->data.i2c[1]); 634 put_unaligned_be16(val, &st->data.i2c[1]);
603 return i2c_master_send(i2c, st->data.i2c, 3); 635
636 ret = i2c_master_send(i2c, st->data.i2c, 3);
637 if (ret < 0)
638 return ret;
639
640 return 0;
604} 641}
605 642
606static int ad5064_i2c_probe(struct i2c_client *i2c, 643static int ad5064_i2c_probe(struct i2c_client *i2c,
@@ -616,12 +653,12 @@ static int ad5064_i2c_remove(struct i2c_client *i2c)
616} 653}
617 654
618static const struct i2c_device_id ad5064_i2c_ids[] = { 655static const struct i2c_device_id ad5064_i2c_ids[] = {
619 {"ad5629-1", ID_AD5628_1}, 656 {"ad5629-1", ID_AD5629_1},
620 {"ad5629-2", ID_AD5628_2}, 657 {"ad5629-2", ID_AD5629_2},
621 {"ad5629-3", ID_AD5628_2}, /* similar enough to ad5629-2 */ 658 {"ad5629-3", ID_AD5629_2}, /* similar enough to ad5629-2 */
622 {"ad5669-1", ID_AD5668_1}, 659 {"ad5669-1", ID_AD5669_1},
623 {"ad5669-2", ID_AD5668_2}, 660 {"ad5669-2", ID_AD5669_2},
624 {"ad5669-3", ID_AD5668_2}, /* similar enough to ad5669-2 */ 661 {"ad5669-3", ID_AD5669_2}, /* similar enough to ad5669-2 */
625 {} 662 {}
626}; 663};
627MODULE_DEVICE_TABLE(i2c, ad5064_i2c_ids); 664MODULE_DEVICE_TABLE(i2c, ad5064_i2c_ids);
diff --git a/drivers/iio/humidity/si7020.c b/drivers/iio/humidity/si7020.c
index 12128d1ca570..71991b5c0658 100644
--- a/drivers/iio/humidity/si7020.c
+++ b/drivers/iio/humidity/si7020.c
@@ -50,10 +50,10 @@ static int si7020_read_raw(struct iio_dev *indio_dev,
50 50
51 switch (mask) { 51 switch (mask) {
52 case IIO_CHAN_INFO_RAW: 52 case IIO_CHAN_INFO_RAW:
53 ret = i2c_smbus_read_word_data(*client, 53 ret = i2c_smbus_read_word_swapped(*client,
54 chan->type == IIO_TEMP ? 54 chan->type == IIO_TEMP ?
55 SI7020CMD_TEMP_HOLD : 55 SI7020CMD_TEMP_HOLD :
56 SI7020CMD_RH_HOLD); 56 SI7020CMD_RH_HOLD);
57 if (ret < 0) 57 if (ret < 0)
58 return ret; 58 return ret;
59 *val = ret >> 2; 59 *val = ret >> 2;
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index cbe198cb3699..471ee36b9c6e 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -216,6 +216,7 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain,
216 u8 *page_addr = (u8 *) (pa & PAGE_MASK); 216 u8 *page_addr = (u8 *) (pa & PAGE_MASK);
217 dma_addr_t start_dma_addr = dma_addr; 217 dma_addr_t start_dma_addr = dma_addr;
218 unsigned long irq_flags, nr_pages, i; 218 unsigned long irq_flags, nr_pages, i;
219 unsigned long *entry;
219 int rc = 0; 220 int rc = 0;
220 221
221 if (dma_addr < s390_domain->domain.geometry.aperture_start || 222 if (dma_addr < s390_domain->domain.geometry.aperture_start ||
@@ -228,8 +229,12 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain,
228 229
229 spin_lock_irqsave(&s390_domain->dma_table_lock, irq_flags); 230 spin_lock_irqsave(&s390_domain->dma_table_lock, irq_flags);
230 for (i = 0; i < nr_pages; i++) { 231 for (i = 0; i < nr_pages; i++) {
231 dma_update_cpu_trans(s390_domain->dma_table, page_addr, 232 entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr);
232 dma_addr, flags); 233 if (!entry) {
234 rc = -ENOMEM;
235 goto undo_cpu_trans;
236 }
237 dma_update_cpu_trans(entry, page_addr, flags);
233 page_addr += PAGE_SIZE; 238 page_addr += PAGE_SIZE;
234 dma_addr += PAGE_SIZE; 239 dma_addr += PAGE_SIZE;
235 } 240 }
@@ -242,6 +247,20 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain,
242 break; 247 break;
243 } 248 }
244 spin_unlock(&s390_domain->list_lock); 249 spin_unlock(&s390_domain->list_lock);
250
251undo_cpu_trans:
252 if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
253 flags = ZPCI_PTE_INVALID;
254 while (i-- > 0) {
255 page_addr -= PAGE_SIZE;
256 dma_addr -= PAGE_SIZE;
257 entry = dma_walk_cpu_trans(s390_domain->dma_table,
258 dma_addr);
259 if (!entry)
260 break;
261 dma_update_cpu_trans(entry, page_addr, flags);
262 }
263 }
245 spin_unlock_irqrestore(&s390_domain->dma_table_lock, irq_flags); 264 spin_unlock_irqrestore(&s390_domain->dma_table_lock, irq_flags);
246 265
247 return rc; 266 return rc;
diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c
index 44a077f3a4a2..f174ce0ca361 100644
--- a/drivers/irqchip/irq-gic-common.c
+++ b/drivers/irqchip/irq-gic-common.c
@@ -84,12 +84,15 @@ void __init gic_dist_config(void __iomem *base, int gic_irqs,
84 writel_relaxed(GICD_INT_DEF_PRI_X4, base + GIC_DIST_PRI + i); 84 writel_relaxed(GICD_INT_DEF_PRI_X4, base + GIC_DIST_PRI + i);
85 85
86 /* 86 /*
87 * Disable all interrupts. Leave the PPI and SGIs alone 87 * Deactivate and disable all SPIs. Leave the PPI and SGIs
88 * as they are enabled by redistributor registers. 88 * alone as they are in the redistributor registers on GICv3.
89 */ 89 */
90 for (i = 32; i < gic_irqs; i += 32) 90 for (i = 32; i < gic_irqs; i += 32) {
91 writel_relaxed(GICD_INT_EN_CLR_X32, 91 writel_relaxed(GICD_INT_EN_CLR_X32,
92 base + GIC_DIST_ENABLE_CLEAR + i / 8); 92 base + GIC_DIST_ACTIVE_CLEAR + i / 8);
93 writel_relaxed(GICD_INT_EN_CLR_X32,
94 base + GIC_DIST_ENABLE_CLEAR + i / 8);
95 }
93 96
94 if (sync_access) 97 if (sync_access)
95 sync_access(); 98 sync_access();
@@ -102,7 +105,9 @@ void gic_cpu_config(void __iomem *base, void (*sync_access)(void))
102 /* 105 /*
103 * Deal with the banked PPI and SGI interrupts - disable all 106 * Deal with the banked PPI and SGI interrupts - disable all
104 * PPI interrupts, ensure all SGI interrupts are enabled. 107 * PPI interrupts, ensure all SGI interrupts are enabled.
108 * Make sure everything is deactivated.
105 */ 109 */
110 writel_relaxed(GICD_INT_EN_CLR_X32, base + GIC_DIST_ACTIVE_CLEAR);
106 writel_relaxed(GICD_INT_EN_CLR_PPI, base + GIC_DIST_ENABLE_CLEAR); 111 writel_relaxed(GICD_INT_EN_CLR_PPI, base + GIC_DIST_ENABLE_CLEAR);
107 writel_relaxed(GICD_INT_EN_SET_SGI, base + GIC_DIST_ENABLE_SET); 112 writel_relaxed(GICD_INT_EN_SET_SGI, base + GIC_DIST_ENABLE_SET);
108 113
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 515c823c1c95..abf2ffaed392 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -73,9 +73,11 @@ struct gic_chip_data {
73 union gic_base cpu_base; 73 union gic_base cpu_base;
74#ifdef CONFIG_CPU_PM 74#ifdef CONFIG_CPU_PM
75 u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)]; 75 u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
76 u32 saved_spi_active[DIV_ROUND_UP(1020, 32)];
76 u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)]; 77 u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
77 u32 saved_spi_target[DIV_ROUND_UP(1020, 4)]; 78 u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
78 u32 __percpu *saved_ppi_enable; 79 u32 __percpu *saved_ppi_enable;
80 u32 __percpu *saved_ppi_active;
79 u32 __percpu *saved_ppi_conf; 81 u32 __percpu *saved_ppi_conf;
80#endif 82#endif
81 struct irq_domain *domain; 83 struct irq_domain *domain;
@@ -566,6 +568,10 @@ static void gic_dist_save(unsigned int gic_nr)
566 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) 568 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
567 gic_data[gic_nr].saved_spi_enable[i] = 569 gic_data[gic_nr].saved_spi_enable[i] =
568 readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); 570 readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
571
572 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
573 gic_data[gic_nr].saved_spi_active[i] =
574 readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
569} 575}
570 576
571/* 577/*
@@ -604,9 +610,19 @@ static void gic_dist_restore(unsigned int gic_nr)
604 writel_relaxed(gic_data[gic_nr].saved_spi_target[i], 610 writel_relaxed(gic_data[gic_nr].saved_spi_target[i],
605 dist_base + GIC_DIST_TARGET + i * 4); 611 dist_base + GIC_DIST_TARGET + i * 4);
606 612
607 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) 613 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
614 writel_relaxed(GICD_INT_EN_CLR_X32,
615 dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
608 writel_relaxed(gic_data[gic_nr].saved_spi_enable[i], 616 writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
609 dist_base + GIC_DIST_ENABLE_SET + i * 4); 617 dist_base + GIC_DIST_ENABLE_SET + i * 4);
618 }
619
620 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
621 writel_relaxed(GICD_INT_EN_CLR_X32,
622 dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
623 writel_relaxed(gic_data[gic_nr].saved_spi_active[i],
624 dist_base + GIC_DIST_ACTIVE_SET + i * 4);
625 }
610 626
611 writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL); 627 writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL);
612} 628}
@@ -631,6 +647,10 @@ static void gic_cpu_save(unsigned int gic_nr)
631 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) 647 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
632 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); 648 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
633 649
650 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_active);
651 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
652 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
653
634 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); 654 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
635 for (i = 0; i < DIV_ROUND_UP(32, 16); i++) 655 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
636 ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); 656 ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
@@ -654,8 +674,18 @@ static void gic_cpu_restore(unsigned int gic_nr)
654 return; 674 return;
655 675
656 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); 676 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
657 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) 677 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
678 writel_relaxed(GICD_INT_EN_CLR_X32,
679 dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
658 writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4); 680 writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
681 }
682
683 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_active);
684 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
685 writel_relaxed(GICD_INT_EN_CLR_X32,
686 dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
687 writel_relaxed(ptr[i], dist_base + GIC_DIST_ACTIVE_SET + i * 4);
688 }
659 689
660 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); 690 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
661 for (i = 0; i < DIV_ROUND_UP(32, 16); i++) 691 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
@@ -710,6 +740,10 @@ static void __init gic_pm_init(struct gic_chip_data *gic)
710 sizeof(u32)); 740 sizeof(u32));
711 BUG_ON(!gic->saved_ppi_enable); 741 BUG_ON(!gic->saved_ppi_enable);
712 742
743 gic->saved_ppi_active = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
744 sizeof(u32));
745 BUG_ON(!gic->saved_ppi_active);
746
713 gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4, 747 gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
714 sizeof(u32)); 748 sizeof(u32));
715 BUG_ON(!gic->saved_ppi_conf); 749 BUG_ON(!gic->saved_ppi_conf);
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index b33f53b3ca93..bf04d2a3cf4a 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -1896,7 +1896,7 @@ static void EChannel_proc_rcv(struct hisax_d_if *d_if)
1896 ptr--; 1896 ptr--;
1897 *ptr++ = '\n'; 1897 *ptr++ = '\n';
1898 *ptr = 0; 1898 *ptr = 0;
1899 HiSax_putstatus(cs, NULL, "%s", cs->dlog); 1899 HiSax_putstatus(cs, NULL, cs->dlog);
1900 } else 1900 } else
1901 HiSax_putstatus(cs, "LogEcho: ", 1901 HiSax_putstatus(cs, "LogEcho: ",
1902 "warning Frame too big (%d)", 1902 "warning Frame too big (%d)",
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 4a4825528188..90449e1e91e5 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -901,7 +901,7 @@ Begin:
901 ptr--; 901 ptr--;
902 *ptr++ = '\n'; 902 *ptr++ = '\n';
903 *ptr = 0; 903 *ptr = 0;
904 HiSax_putstatus(cs, NULL, "%s", cs->dlog); 904 HiSax_putstatus(cs, NULL, cs->dlog);
905 } else 905 } else
906 HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", total - 3); 906 HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", total - 3);
907 } 907 }
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c
index b1fad81f0722..13b2151c10f5 100644
--- a/drivers/isdn/hisax/hfc_sx.c
+++ b/drivers/isdn/hisax/hfc_sx.c
@@ -674,7 +674,7 @@ receive_emsg(struct IsdnCardState *cs)
674 ptr--; 674 ptr--;
675 *ptr++ = '\n'; 675 *ptr++ = '\n';
676 *ptr = 0; 676 *ptr = 0;
677 HiSax_putstatus(cs, NULL, "%s", cs->dlog); 677 HiSax_putstatus(cs, NULL, cs->dlog);
678 } else 678 } else
679 HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", skb->len); 679 HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", skb->len);
680 } 680 }
diff --git a/drivers/isdn/hisax/q931.c b/drivers/isdn/hisax/q931.c
index b420f8bd862e..ba4beb25d872 100644
--- a/drivers/isdn/hisax/q931.c
+++ b/drivers/isdn/hisax/q931.c
@@ -1179,7 +1179,7 @@ LogFrame(struct IsdnCardState *cs, u_char *buf, int size)
1179 dp--; 1179 dp--;
1180 *dp++ = '\n'; 1180 *dp++ = '\n';
1181 *dp = 0; 1181 *dp = 0;
1182 HiSax_putstatus(cs, NULL, "%s", cs->dlog); 1182 HiSax_putstatus(cs, NULL, cs->dlog);
1183 } else 1183 } else
1184 HiSax_putstatus(cs, "LogFrame: ", "warning Frame too big (%d)", size); 1184 HiSax_putstatus(cs, "LogFrame: ", "warning Frame too big (%d)", size);
1185} 1185}
@@ -1246,7 +1246,7 @@ dlogframe(struct IsdnCardState *cs, struct sk_buff *skb, int dir)
1246 } 1246 }
1247 if (finish) { 1247 if (finish) {
1248 *dp = 0; 1248 *dp = 0;
1249 HiSax_putstatus(cs, NULL, "%s", cs->dlog); 1249 HiSax_putstatus(cs, NULL, cs->dlog);
1250 return; 1250 return;
1251 } 1251 }
1252 if ((0xfe & buf[0]) == PROTO_DIS_N0) { /* 1TR6 */ 1252 if ((0xfe & buf[0]) == PROTO_DIS_N0) { /* 1TR6 */
@@ -1509,5 +1509,5 @@ dlogframe(struct IsdnCardState *cs, struct sk_buff *skb, int dir)
1509 dp += sprintf(dp, "Unknown protocol %x!", buf[0]); 1509 dp += sprintf(dp, "Unknown protocol %x!", buf[0]);
1510 } 1510 }
1511 *dp = 0; 1511 *dp = 0;
1512 HiSax_putstatus(cs, NULL, "%s", cs->dlog); 1512 HiSax_putstatus(cs, NULL, cs->dlog);
1513} 1513}
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index f659e605a406..86ce887b2ed6 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -123,6 +123,26 @@ void nvm_unregister_mgr(struct nvmm_type *mt)
123} 123}
124EXPORT_SYMBOL(nvm_unregister_mgr); 124EXPORT_SYMBOL(nvm_unregister_mgr);
125 125
126/* register with device with a supported manager */
127static int register_mgr(struct nvm_dev *dev)
128{
129 struct nvmm_type *mt;
130 int ret = 0;
131
132 list_for_each_entry(mt, &nvm_mgrs, list) {
133 ret = mt->register_mgr(dev);
134 if (ret > 0) {
135 dev->mt = mt;
136 break; /* successfully initialized */
137 }
138 }
139
140 if (!ret)
141 pr_info("nvm: no compatible nvm manager found.\n");
142
143 return ret;
144}
145
126static struct nvm_dev *nvm_find_nvm_dev(const char *name) 146static struct nvm_dev *nvm_find_nvm_dev(const char *name)
127{ 147{
128 struct nvm_dev *dev; 148 struct nvm_dev *dev;
@@ -160,11 +180,6 @@ int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk)
160} 180}
161EXPORT_SYMBOL(nvm_erase_blk); 181EXPORT_SYMBOL(nvm_erase_blk);
162 182
163static void nvm_core_free(struct nvm_dev *dev)
164{
165 kfree(dev);
166}
167
168static int nvm_core_init(struct nvm_dev *dev) 183static int nvm_core_init(struct nvm_dev *dev)
169{ 184{
170 struct nvm_id *id = &dev->identity; 185 struct nvm_id *id = &dev->identity;
@@ -179,12 +194,21 @@ static int nvm_core_init(struct nvm_dev *dev)
179 dev->sec_size = grp->csecs; 194 dev->sec_size = grp->csecs;
180 dev->oob_size = grp->sos; 195 dev->oob_size = grp->sos;
181 dev->sec_per_pg = grp->fpg_sz / grp->csecs; 196 dev->sec_per_pg = grp->fpg_sz / grp->csecs;
182 dev->addr_mode = id->ppat; 197 memcpy(&dev->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
183 dev->addr_format = id->ppaf;
184 198
185 dev->plane_mode = NVM_PLANE_SINGLE; 199 dev->plane_mode = NVM_PLANE_SINGLE;
186 dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size; 200 dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size;
187 201
202 if (grp->mtype != 0) {
203 pr_err("nvm: memory type not supported\n");
204 return -EINVAL;
205 }
206
207 if (grp->fmtype != 0 && grp->fmtype != 1) {
208 pr_err("nvm: flash type not supported\n");
209 return -EINVAL;
210 }
211
188 if (grp->mpos & 0x020202) 212 if (grp->mpos & 0x020202)
189 dev->plane_mode = NVM_PLANE_DOUBLE; 213 dev->plane_mode = NVM_PLANE_DOUBLE;
190 if (grp->mpos & 0x040404) 214 if (grp->mpos & 0x040404)
@@ -213,21 +237,17 @@ static void nvm_free(struct nvm_dev *dev)
213 237
214 if (dev->mt) 238 if (dev->mt)
215 dev->mt->unregister_mgr(dev); 239 dev->mt->unregister_mgr(dev);
216
217 nvm_core_free(dev);
218} 240}
219 241
220static int nvm_init(struct nvm_dev *dev) 242static int nvm_init(struct nvm_dev *dev)
221{ 243{
222 struct nvmm_type *mt; 244 int ret = -EINVAL;
223 int ret = 0;
224 245
225 if (!dev->q || !dev->ops) 246 if (!dev->q || !dev->ops)
226 return -EINVAL; 247 return ret;
227 248
228 if (dev->ops->identity(dev->q, &dev->identity)) { 249 if (dev->ops->identity(dev->q, &dev->identity)) {
229 pr_err("nvm: device could not be identified\n"); 250 pr_err("nvm: device could not be identified\n");
230 ret = -EINVAL;
231 goto err; 251 goto err;
232 } 252 }
233 253
@@ -251,21 +271,13 @@ static int nvm_init(struct nvm_dev *dev)
251 goto err; 271 goto err;
252 } 272 }
253 273
254 /* register with device with a supported manager */ 274 down_write(&nvm_lock);
255 list_for_each_entry(mt, &nvm_mgrs, list) { 275 ret = register_mgr(dev);
256 ret = mt->register_mgr(dev); 276 up_write(&nvm_lock);
257 if (ret < 0) 277 if (ret < 0)
258 goto err; /* initialization failed */ 278 goto err;
259 if (ret > 0) { 279 if (!ret)
260 dev->mt = mt;
261 break; /* successfully initialized */
262 }
263 }
264
265 if (!ret) {
266 pr_info("nvm: no compatible manager found.\n");
267 return 0; 280 return 0;
268 }
269 281
270 pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n", 282 pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
271 dev->name, dev->sec_per_pg, dev->nr_planes, 283 dev->name, dev->sec_per_pg, dev->nr_planes,
@@ -273,7 +285,6 @@ static int nvm_init(struct nvm_dev *dev)
273 dev->nr_chnls); 285 dev->nr_chnls);
274 return 0; 286 return 0;
275err: 287err:
276 nvm_free(dev);
277 pr_err("nvm: failed to initialize nvm\n"); 288 pr_err("nvm: failed to initialize nvm\n");
278 return ret; 289 return ret;
279} 290}
@@ -308,22 +319,26 @@ int nvm_register(struct request_queue *q, char *disk_name,
308 if (ret) 319 if (ret)
309 goto err_init; 320 goto err_init;
310 321
311 down_write(&nvm_lock); 322 if (dev->ops->max_phys_sect > 256) {
312 list_add(&dev->devices, &nvm_devices); 323 pr_info("nvm: max sectors supported is 256.\n");
313 up_write(&nvm_lock); 324 ret = -EINVAL;
325 goto err_init;
326 }
314 327
315 if (dev->ops->max_phys_sect > 1) { 328 if (dev->ops->max_phys_sect > 1) {
316 dev->ppalist_pool = dev->ops->create_dma_pool(dev->q, 329 dev->ppalist_pool = dev->ops->create_dma_pool(dev->q,
317 "ppalist"); 330 "ppalist");
318 if (!dev->ppalist_pool) { 331 if (!dev->ppalist_pool) {
319 pr_err("nvm: could not create ppa pool\n"); 332 pr_err("nvm: could not create ppa pool\n");
320 return -ENOMEM; 333 ret = -ENOMEM;
334 goto err_init;
321 } 335 }
322 } else if (dev->ops->max_phys_sect > 256) {
323 pr_info("nvm: max sectors supported is 256.\n");
324 return -EINVAL;
325 } 336 }
326 337
338 down_write(&nvm_lock);
339 list_add(&dev->devices, &nvm_devices);
340 up_write(&nvm_lock);
341
327 return 0; 342 return 0;
328err_init: 343err_init:
329 kfree(dev); 344 kfree(dev);
@@ -333,19 +348,22 @@ EXPORT_SYMBOL(nvm_register);
333 348
334void nvm_unregister(char *disk_name) 349void nvm_unregister(char *disk_name)
335{ 350{
336 struct nvm_dev *dev = nvm_find_nvm_dev(disk_name); 351 struct nvm_dev *dev;
337 352
353 down_write(&nvm_lock);
354 dev = nvm_find_nvm_dev(disk_name);
338 if (!dev) { 355 if (!dev) {
339 pr_err("nvm: could not find device %s to unregister\n", 356 pr_err("nvm: could not find device %s to unregister\n",
340 disk_name); 357 disk_name);
358 up_write(&nvm_lock);
341 return; 359 return;
342 } 360 }
343 361
344 nvm_exit(dev);
345
346 down_write(&nvm_lock);
347 list_del(&dev->devices); 362 list_del(&dev->devices);
348 up_write(&nvm_lock); 363 up_write(&nvm_lock);
364
365 nvm_exit(dev);
366 kfree(dev);
349} 367}
350EXPORT_SYMBOL(nvm_unregister); 368EXPORT_SYMBOL(nvm_unregister);
351 369
@@ -358,38 +376,30 @@ static int nvm_create_target(struct nvm_dev *dev,
358{ 376{
359 struct nvm_ioctl_create_simple *s = &create->conf.s; 377 struct nvm_ioctl_create_simple *s = &create->conf.s;
360 struct request_queue *tqueue; 378 struct request_queue *tqueue;
361 struct nvmm_type *mt;
362 struct gendisk *tdisk; 379 struct gendisk *tdisk;
363 struct nvm_tgt_type *tt; 380 struct nvm_tgt_type *tt;
364 struct nvm_target *t; 381 struct nvm_target *t;
365 void *targetdata; 382 void *targetdata;
366 int ret = 0; 383 int ret = 0;
367 384
385 down_write(&nvm_lock);
368 if (!dev->mt) { 386 if (!dev->mt) {
369 /* register with device with a supported NVM manager */ 387 ret = register_mgr(dev);
370 list_for_each_entry(mt, &nvm_mgrs, list) { 388 if (!ret)
371 ret = mt->register_mgr(dev); 389 ret = -ENODEV;
372 if (ret < 0) 390 if (ret < 0) {
373 return ret; /* initialization failed */ 391 up_write(&nvm_lock);
374 if (ret > 0) { 392 return ret;
375 dev->mt = mt;
376 break; /* successfully initialized */
377 }
378 }
379
380 if (!ret) {
381 pr_info("nvm: no compatible nvm manager found.\n");
382 return -ENODEV;
383 } 393 }
384 } 394 }
385 395
386 tt = nvm_find_target_type(create->tgttype); 396 tt = nvm_find_target_type(create->tgttype);
387 if (!tt) { 397 if (!tt) {
388 pr_err("nvm: target type %s not found\n", create->tgttype); 398 pr_err("nvm: target type %s not found\n", create->tgttype);
399 up_write(&nvm_lock);
389 return -EINVAL; 400 return -EINVAL;
390 } 401 }
391 402
392 down_write(&nvm_lock);
393 list_for_each_entry(t, &dev->online_targets, list) { 403 list_for_each_entry(t, &dev->online_targets, list) {
394 if (!strcmp(create->tgtname, t->disk->disk_name)) { 404 if (!strcmp(create->tgtname, t->disk->disk_name)) {
395 pr_err("nvm: target name already exists.\n"); 405 pr_err("nvm: target name already exists.\n");
@@ -457,11 +467,11 @@ static void nvm_remove_target(struct nvm_target *t)
457 lockdep_assert_held(&nvm_lock); 467 lockdep_assert_held(&nvm_lock);
458 468
459 del_gendisk(tdisk); 469 del_gendisk(tdisk);
470 blk_cleanup_queue(q);
471
460 if (tt->exit) 472 if (tt->exit)
461 tt->exit(tdisk->private_data); 473 tt->exit(tdisk->private_data);
462 474
463 blk_cleanup_queue(q);
464
465 put_disk(tdisk); 475 put_disk(tdisk);
466 476
467 list_del(&t->list); 477 list_del(&t->list);
@@ -473,7 +483,9 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create)
473 struct nvm_dev *dev; 483 struct nvm_dev *dev;
474 struct nvm_ioctl_create_simple *s; 484 struct nvm_ioctl_create_simple *s;
475 485
486 down_write(&nvm_lock);
476 dev = nvm_find_nvm_dev(create->dev); 487 dev = nvm_find_nvm_dev(create->dev);
488 up_write(&nvm_lock);
477 if (!dev) { 489 if (!dev) {
478 pr_err("nvm: device not found\n"); 490 pr_err("nvm: device not found\n");
479 return -EINVAL; 491 return -EINVAL;
@@ -532,7 +544,9 @@ static int nvm_configure_show(const char *val)
532 return -EINVAL; 544 return -EINVAL;
533 } 545 }
534 546
547 down_write(&nvm_lock);
535 dev = nvm_find_nvm_dev(devname); 548 dev = nvm_find_nvm_dev(devname);
549 up_write(&nvm_lock);
536 if (!dev) { 550 if (!dev) {
537 pr_err("nvm: device not found\n"); 551 pr_err("nvm: device not found\n");
538 return -EINVAL; 552 return -EINVAL;
@@ -541,7 +555,7 @@ static int nvm_configure_show(const char *val)
541 if (!dev->mt) 555 if (!dev->mt)
542 return 0; 556 return 0;
543 557
544 dev->mt->free_blocks_print(dev); 558 dev->mt->lun_info_print(dev);
545 559
546 return 0; 560 return 0;
547} 561}
@@ -677,8 +691,10 @@ static long nvm_ioctl_info(struct file *file, void __user *arg)
677 info->tgtsize = tgt_iter; 691 info->tgtsize = tgt_iter;
678 up_write(&nvm_lock); 692 up_write(&nvm_lock);
679 693
680 if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) 694 if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
695 kfree(info);
681 return -EFAULT; 696 return -EFAULT;
697 }
682 698
683 kfree(info); 699 kfree(info);
684 return 0; 700 return 0;
@@ -721,8 +737,11 @@ static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
721 737
722 devices->nr_devices = i; 738 devices->nr_devices = i;
723 739
724 if (copy_to_user(arg, devices, sizeof(struct nvm_ioctl_get_devices))) 740 if (copy_to_user(arg, devices,
741 sizeof(struct nvm_ioctl_get_devices))) {
742 kfree(devices);
725 return -EFAULT; 743 return -EFAULT;
744 }
726 745
727 kfree(devices); 746 kfree(devices);
728 return 0; 747 return 0;
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index ae1fb2bdc5f4..35dde84b71e9 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -60,23 +60,27 @@ static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn)
60 lun->vlun.lun_id = i % dev->luns_per_chnl; 60 lun->vlun.lun_id = i % dev->luns_per_chnl;
61 lun->vlun.chnl_id = i / dev->luns_per_chnl; 61 lun->vlun.chnl_id = i / dev->luns_per_chnl;
62 lun->vlun.nr_free_blocks = dev->blks_per_lun; 62 lun->vlun.nr_free_blocks = dev->blks_per_lun;
63 lun->vlun.nr_inuse_blocks = 0;
64 lun->vlun.nr_bad_blocks = 0;
63 } 65 }
64 return 0; 66 return 0;
65} 67}
66 68
67static int gennvm_block_bb(u32 lun_id, void *bb_bitmap, unsigned int nr_blocks, 69static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks,
68 void *private) 70 void *private)
69{ 71{
70 struct gen_nvm *gn = private; 72 struct gen_nvm *gn = private;
71 struct gen_lun *lun = &gn->luns[lun_id]; 73 struct nvm_dev *dev = gn->dev;
74 struct gen_lun *lun;
72 struct nvm_block *blk; 75 struct nvm_block *blk;
73 int i; 76 int i;
74 77
75 if (unlikely(bitmap_empty(bb_bitmap, nr_blocks))) 78 lun = &gn->luns[(dev->nr_luns * ppa.g.ch) + ppa.g.lun];
76 return 0; 79
80 for (i = 0; i < nr_blocks; i++) {
81 if (blks[i] == 0)
82 continue;
77 83
78 i = -1;
79 while ((i = find_next_bit(bb_bitmap, nr_blocks, i + 1)) < nr_blocks) {
80 blk = &lun->vlun.blocks[i]; 84 blk = &lun->vlun.blocks[i];
81 if (!blk) { 85 if (!blk) {
82 pr_err("gennvm: BB data is out of bounds.\n"); 86 pr_err("gennvm: BB data is out of bounds.\n");
@@ -84,6 +88,7 @@ static int gennvm_block_bb(u32 lun_id, void *bb_bitmap, unsigned int nr_blocks,
84 } 88 }
85 89
86 list_move_tail(&blk->list, &lun->bb_list); 90 list_move_tail(&blk->list, &lun->bb_list);
91 lun->vlun.nr_bad_blocks++;
87 } 92 }
88 93
89 return 0; 94 return 0;
@@ -136,6 +141,7 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
136 list_move_tail(&blk->list, &lun->used_list); 141 list_move_tail(&blk->list, &lun->used_list);
137 blk->type = 1; 142 blk->type = 1;
138 lun->vlun.nr_free_blocks--; 143 lun->vlun.nr_free_blocks--;
144 lun->vlun.nr_inuse_blocks++;
139 } 145 }
140 } 146 }
141 147
@@ -164,15 +170,25 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
164 block->id = cur_block_id++; 170 block->id = cur_block_id++;
165 171
166 /* First block is reserved for device */ 172 /* First block is reserved for device */
167 if (unlikely(lun_iter == 0 && blk_iter == 0)) 173 if (unlikely(lun_iter == 0 && blk_iter == 0)) {
174 lun->vlun.nr_free_blocks--;
168 continue; 175 continue;
176 }
169 177
170 list_add_tail(&block->list, &lun->free_list); 178 list_add_tail(&block->list, &lun->free_list);
171 } 179 }
172 180
173 if (dev->ops->get_bb_tbl) { 181 if (dev->ops->get_bb_tbl) {
174 ret = dev->ops->get_bb_tbl(dev->q, lun->vlun.id, 182 struct ppa_addr ppa;
175 dev->blks_per_lun, gennvm_block_bb, gn); 183
184 ppa.ppa = 0;
185 ppa.g.ch = lun->vlun.chnl_id;
186 ppa.g.lun = lun->vlun.id;
187 ppa = generic_to_dev_addr(dev, ppa);
188
189 ret = dev->ops->get_bb_tbl(dev, ppa,
190 dev->blks_per_lun,
191 gennvm_block_bb, gn);
176 if (ret) 192 if (ret)
177 pr_err("gennvm: could not read BB table\n"); 193 pr_err("gennvm: could not read BB table\n");
178 } 194 }
@@ -190,6 +206,14 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
190 return 0; 206 return 0;
191} 207}
192 208
209static void gennvm_free(struct nvm_dev *dev)
210{
211 gennvm_blocks_free(dev);
212 gennvm_luns_free(dev);
213 kfree(dev->mp);
214 dev->mp = NULL;
215}
216
193static int gennvm_register(struct nvm_dev *dev) 217static int gennvm_register(struct nvm_dev *dev)
194{ 218{
195 struct gen_nvm *gn; 219 struct gen_nvm *gn;
@@ -199,6 +223,7 @@ static int gennvm_register(struct nvm_dev *dev)
199 if (!gn) 223 if (!gn)
200 return -ENOMEM; 224 return -ENOMEM;
201 225
226 gn->dev = dev;
202 gn->nr_luns = dev->nr_luns; 227 gn->nr_luns = dev->nr_luns;
203 dev->mp = gn; 228 dev->mp = gn;
204 229
@@ -216,16 +241,13 @@ static int gennvm_register(struct nvm_dev *dev)
216 241
217 return 1; 242 return 1;
218err: 243err:
219 kfree(gn); 244 gennvm_free(dev);
220 return ret; 245 return ret;
221} 246}
222 247
223static void gennvm_unregister(struct nvm_dev *dev) 248static void gennvm_unregister(struct nvm_dev *dev)
224{ 249{
225 gennvm_blocks_free(dev); 250 gennvm_free(dev);
226 gennvm_luns_free(dev);
227 kfree(dev->mp);
228 dev->mp = NULL;
229} 251}
230 252
231static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev, 253static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
@@ -254,6 +276,7 @@ static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
254 blk->type = 1; 276 blk->type = 1;
255 277
256 lun->vlun.nr_free_blocks--; 278 lun->vlun.nr_free_blocks--;
279 lun->vlun.nr_inuse_blocks++;
257 280
258 spin_unlock(&vlun->lock); 281 spin_unlock(&vlun->lock);
259out: 282out:
@@ -271,16 +294,21 @@ static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
271 case 1: 294 case 1:
272 list_move_tail(&blk->list, &lun->free_list); 295 list_move_tail(&blk->list, &lun->free_list);
273 lun->vlun.nr_free_blocks++; 296 lun->vlun.nr_free_blocks++;
297 lun->vlun.nr_inuse_blocks--;
274 blk->type = 0; 298 blk->type = 0;
275 break; 299 break;
276 case 2: 300 case 2:
277 list_move_tail(&blk->list, &lun->bb_list); 301 list_move_tail(&blk->list, &lun->bb_list);
302 lun->vlun.nr_bad_blocks++;
303 lun->vlun.nr_inuse_blocks--;
278 break; 304 break;
279 default: 305 default:
280 WARN_ON_ONCE(1); 306 WARN_ON_ONCE(1);
281 pr_err("gennvm: erroneous block type (%lu -> %u)\n", 307 pr_err("gennvm: erroneous block type (%lu -> %u)\n",
282 blk->id, blk->type); 308 blk->id, blk->type);
283 list_move_tail(&blk->list, &lun->bb_list); 309 list_move_tail(&blk->list, &lun->bb_list);
310 lun->vlun.nr_bad_blocks++;
311 lun->vlun.nr_inuse_blocks--;
284 } 312 }
285 313
286 spin_unlock(&vlun->lock); 314 spin_unlock(&vlun->lock);
@@ -292,10 +320,10 @@ static void gennvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
292 320
293 if (rqd->nr_pages > 1) { 321 if (rqd->nr_pages > 1) {
294 for (i = 0; i < rqd->nr_pages; i++) 322 for (i = 0; i < rqd->nr_pages; i++)
295 rqd->ppa_list[i] = addr_to_generic_mode(dev, 323 rqd->ppa_list[i] = dev_to_generic_addr(dev,
296 rqd->ppa_list[i]); 324 rqd->ppa_list[i]);
297 } else { 325 } else {
298 rqd->ppa_addr = addr_to_generic_mode(dev, rqd->ppa_addr); 326 rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
299 } 327 }
300} 328}
301 329
@@ -305,10 +333,10 @@ static void gennvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
305 333
306 if (rqd->nr_pages > 1) { 334 if (rqd->nr_pages > 1) {
307 for (i = 0; i < rqd->nr_pages; i++) 335 for (i = 0; i < rqd->nr_pages; i++)
308 rqd->ppa_list[i] = generic_to_addr_mode(dev, 336 rqd->ppa_list[i] = generic_to_dev_addr(dev,
309 rqd->ppa_list[i]); 337 rqd->ppa_list[i]);
310 } else { 338 } else {
311 rqd->ppa_addr = generic_to_addr_mode(dev, rqd->ppa_addr); 339 rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
312 } 340 }
313} 341}
314 342
@@ -354,10 +382,10 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
354{ 382{
355 int i; 383 int i;
356 384
357 if (!dev->ops->set_bb) 385 if (!dev->ops->set_bb_tbl)
358 return; 386 return;
359 387
360 if (dev->ops->set_bb(dev->q, rqd, 1)) 388 if (dev->ops->set_bb_tbl(dev->q, rqd, 1))
361 return; 389 return;
362 390
363 gennvm_addr_to_generic_mode(dev, rqd); 391 gennvm_addr_to_generic_mode(dev, rqd);
@@ -440,15 +468,24 @@ static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid)
440 return &gn->luns[lunid].vlun; 468 return &gn->luns[lunid].vlun;
441} 469}
442 470
443static void gennvm_free_blocks_print(struct nvm_dev *dev) 471static void gennvm_lun_info_print(struct nvm_dev *dev)
444{ 472{
445 struct gen_nvm *gn = dev->mp; 473 struct gen_nvm *gn = dev->mp;
446 struct gen_lun *lun; 474 struct gen_lun *lun;
447 unsigned int i; 475 unsigned int i;
448 476
449 gennvm_for_each_lun(gn, lun, i) 477
450 pr_info("%s: lun%8u\t%u\n", 478 gennvm_for_each_lun(gn, lun, i) {
451 dev->name, i, lun->vlun.nr_free_blocks); 479 spin_lock(&lun->vlun.lock);
480
481 pr_info("%s: lun%8u\t%u\t%u\t%u\n",
482 dev->name, i,
483 lun->vlun.nr_free_blocks,
484 lun->vlun.nr_inuse_blocks,
485 lun->vlun.nr_bad_blocks);
486
487 spin_unlock(&lun->vlun.lock);
488 }
452} 489}
453 490
454static struct nvmm_type gennvm = { 491static struct nvmm_type gennvm = {
@@ -466,7 +503,7 @@ static struct nvmm_type gennvm = {
466 .erase_blk = gennvm_erase_blk, 503 .erase_blk = gennvm_erase_blk,
467 504
468 .get_lun = gennvm_get_lun, 505 .get_lun = gennvm_get_lun,
469 .free_blocks_print = gennvm_free_blocks_print, 506 .lun_info_print = gennvm_lun_info_print,
470}; 507};
471 508
472static int __init gennvm_module_init(void) 509static int __init gennvm_module_init(void)
diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h
index d23bd3501ddc..9c24b5b32dac 100644
--- a/drivers/lightnvm/gennvm.h
+++ b/drivers/lightnvm/gennvm.h
@@ -35,6 +35,8 @@ struct gen_lun {
35}; 35};
36 36
37struct gen_nvm { 37struct gen_nvm {
38 struct nvm_dev *dev;
39
38 int nr_luns; 40 int nr_luns;
39 struct gen_lun *luns; 41 struct gen_lun *luns;
40}; 42};
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index 7ba64c87ba1c..75e59c3a3f96 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -123,12 +123,42 @@ static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
123 return blk->id * rrpc->dev->pgs_per_blk; 123 return blk->id * rrpc->dev->pgs_per_blk;
124} 124}
125 125
126static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev,
127 struct ppa_addr r)
128{
129 struct ppa_addr l;
130 int secs, pgs, blks, luns;
131 sector_t ppa = r.ppa;
132
133 l.ppa = 0;
134
135 div_u64_rem(ppa, dev->sec_per_pg, &secs);
136 l.g.sec = secs;
137
138 sector_div(ppa, dev->sec_per_pg);
139 div_u64_rem(ppa, dev->sec_per_blk, &pgs);
140 l.g.pg = pgs;
141
142 sector_div(ppa, dev->pgs_per_blk);
143 div_u64_rem(ppa, dev->blks_per_lun, &blks);
144 l.g.blk = blks;
145
146 sector_div(ppa, dev->blks_per_lun);
147 div_u64_rem(ppa, dev->luns_per_chnl, &luns);
148 l.g.lun = luns;
149
150 sector_div(ppa, dev->luns_per_chnl);
151 l.g.ch = ppa;
152
153 return l;
154}
155
126static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr) 156static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr)
127{ 157{
128 struct ppa_addr paddr; 158 struct ppa_addr paddr;
129 159
130 paddr.ppa = addr; 160 paddr.ppa = addr;
131 return __linear_to_generic_addr(dev, paddr); 161 return linear_to_generic_addr(dev, paddr);
132} 162}
133 163
134/* requires lun->lock taken */ 164/* requires lun->lock taken */
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 917d47e290ae..3147c8d09ea8 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -112,7 +112,8 @@ struct iv_tcw_private {
112 * and encrypts / decrypts at the same time. 112 * and encrypts / decrypts at the same time.
113 */ 113 */
114enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID, 114enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
115 DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD }; 115 DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD,
116 DM_CRYPT_EXIT_THREAD};
116 117
117/* 118/*
118 * The fields in here must be read only after initialization. 119 * The fields in here must be read only after initialization.
@@ -1203,20 +1204,18 @@ continue_locked:
1203 if (!RB_EMPTY_ROOT(&cc->write_tree)) 1204 if (!RB_EMPTY_ROOT(&cc->write_tree))
1204 goto pop_from_list; 1205 goto pop_from_list;
1205 1206
1207 if (unlikely(test_bit(DM_CRYPT_EXIT_THREAD, &cc->flags))) {
1208 spin_unlock_irq(&cc->write_thread_wait.lock);
1209 break;
1210 }
1211
1206 __set_current_state(TASK_INTERRUPTIBLE); 1212 __set_current_state(TASK_INTERRUPTIBLE);
1207 __add_wait_queue(&cc->write_thread_wait, &wait); 1213 __add_wait_queue(&cc->write_thread_wait, &wait);
1208 1214
1209 spin_unlock_irq(&cc->write_thread_wait.lock); 1215 spin_unlock_irq(&cc->write_thread_wait.lock);
1210 1216
1211 if (unlikely(kthread_should_stop())) {
1212 set_task_state(current, TASK_RUNNING);
1213 remove_wait_queue(&cc->write_thread_wait, &wait);
1214 break;
1215 }
1216
1217 schedule(); 1217 schedule();
1218 1218
1219 set_task_state(current, TASK_RUNNING);
1220 spin_lock_irq(&cc->write_thread_wait.lock); 1219 spin_lock_irq(&cc->write_thread_wait.lock);
1221 __remove_wait_queue(&cc->write_thread_wait, &wait); 1220 __remove_wait_queue(&cc->write_thread_wait, &wait);
1222 goto continue_locked; 1221 goto continue_locked;
@@ -1531,8 +1530,13 @@ static void crypt_dtr(struct dm_target *ti)
1531 if (!cc) 1530 if (!cc)
1532 return; 1531 return;
1533 1532
1534 if (cc->write_thread) 1533 if (cc->write_thread) {
1534 spin_lock_irq(&cc->write_thread_wait.lock);
1535 set_bit(DM_CRYPT_EXIT_THREAD, &cc->flags);
1536 wake_up_locked(&cc->write_thread_wait);
1537 spin_unlock_irq(&cc->write_thread_wait.lock);
1535 kthread_stop(cc->write_thread); 1538 kthread_stop(cc->write_thread);
1539 }
1536 1540
1537 if (cc->io_queue) 1541 if (cc->io_queue)
1538 destroy_workqueue(cc->io_queue); 1542 destroy_workqueue(cc->io_queue);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index aaa6caa46a9f..cfa29f574c2a 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1537,32 +1537,34 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
1537 struct block_device **bdev, fmode_t *mode) 1537 struct block_device **bdev, fmode_t *mode)
1538{ 1538{
1539 struct multipath *m = ti->private; 1539 struct multipath *m = ti->private;
1540 struct pgpath *pgpath;
1541 unsigned long flags; 1540 unsigned long flags;
1542 int r; 1541 int r;
1543 1542
1544 r = 0;
1545
1546 spin_lock_irqsave(&m->lock, flags); 1543 spin_lock_irqsave(&m->lock, flags);
1547 1544
1548 if (!m->current_pgpath) 1545 if (!m->current_pgpath)
1549 __choose_pgpath(m, 0); 1546 __choose_pgpath(m, 0);
1550 1547
1551 pgpath = m->current_pgpath; 1548 if (m->current_pgpath) {
1552 1549 if (!m->queue_io) {
1553 if (pgpath) { 1550 *bdev = m->current_pgpath->path.dev->bdev;
1554 *bdev = pgpath->path.dev->bdev; 1551 *mode = m->current_pgpath->path.dev->mode;
1555 *mode = pgpath->path.dev->mode; 1552 r = 0;
1553 } else {
1554 /* pg_init has not started or completed */
1555 r = -ENOTCONN;
1556 }
1557 } else {
1558 /* No path is available */
1559 if (m->queue_if_no_path)
1560 r = -ENOTCONN;
1561 else
1562 r = -EIO;
1556 } 1563 }
1557 1564
1558 if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path))
1559 r = -ENOTCONN;
1560 else if (!*bdev)
1561 r = -EIO;
1562
1563 spin_unlock_irqrestore(&m->lock, flags); 1565 spin_unlock_irqrestore(&m->lock, flags);
1564 1566
1565 if (r == -ENOTCONN && !fatal_signal_pending(current)) { 1567 if (r == -ENOTCONN) {
1566 spin_lock_irqsave(&m->lock, flags); 1568 spin_lock_irqsave(&m->lock, flags);
1567 if (!m->current_pg) { 1569 if (!m->current_pg) {
1568 /* Path status changed, redo selection */ 1570 /* Path status changed, redo selection */
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 3897b90bd462..63903a5a5d9e 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2432,6 +2432,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
2432 case PM_WRITE: 2432 case PM_WRITE:
2433 if (old_mode != new_mode) 2433 if (old_mode != new_mode)
2434 notify_of_pool_mode_change(pool, "write"); 2434 notify_of_pool_mode_change(pool, "write");
2435 pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space;
2435 dm_pool_metadata_read_write(pool->pmd); 2436 dm_pool_metadata_read_write(pool->pmd);
2436 pool->process_bio = process_bio; 2437 pool->process_bio = process_bio;
2437 pool->process_discard = process_discard_bio; 2438 pool->process_discard = process_discard_bio;
@@ -4249,10 +4250,9 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
4249{ 4250{
4250 struct thin_c *tc = ti->private; 4251 struct thin_c *tc = ti->private;
4251 struct pool *pool = tc->pool; 4252 struct pool *pool = tc->pool;
4252 struct queue_limits *pool_limits = dm_get_queue_limits(pool->pool_md);
4253 4253
4254 if (!pool_limits->discard_granularity) 4254 if (!pool->pf.discard_enabled)
4255 return; /* pool's discard support is disabled */ 4255 return;
4256 4256
4257 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; 4257 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
4258 limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */ 4258 limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 6e15f3565892..5df40480228b 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -591,7 +591,7 @@ retry:
591 591
592out: 592out:
593 dm_put_live_table(md, *srcu_idx); 593 dm_put_live_table(md, *srcu_idx);
594 if (r == -ENOTCONN) { 594 if (r == -ENOTCONN && !fatal_signal_pending(current)) {
595 msleep(10); 595 msleep(10);
596 goto retry; 596 goto retry;
597 } 597 }
@@ -603,9 +603,10 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
603{ 603{
604 struct mapped_device *md = bdev->bd_disk->private_data; 604 struct mapped_device *md = bdev->bd_disk->private_data;
605 struct dm_target *tgt; 605 struct dm_target *tgt;
606 struct block_device *tgt_bdev = NULL;
606 int srcu_idx, r; 607 int srcu_idx, r;
607 608
608 r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx); 609 r = dm_get_live_table_for_ioctl(md, &tgt, &tgt_bdev, &mode, &srcu_idx);
609 if (r < 0) 610 if (r < 0)
610 return r; 611 return r;
611 612
@@ -620,7 +621,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
620 goto out; 621 goto out;
621 } 622 }
622 623
623 r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); 624 r = __blkdev_driver_ioctl(tgt_bdev, mode, cmd, arg);
624out: 625out:
625 dm_put_live_table(md, srcu_idx); 626 dm_put_live_table(md, srcu_idx);
626 return r; 627 return r;
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index 35759a91d47d..e8f847226a19 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -1992,9 +1992,9 @@ static int cx23885_initdev(struct pci_dev *pci_dev,
1992 (unsigned long long)pci_resource_start(pci_dev, 0)); 1992 (unsigned long long)pci_resource_start(pci_dev, 0));
1993 1993
1994 pci_set_master(pci_dev); 1994 pci_set_master(pci_dev);
1995 if (!pci_set_dma_mask(pci_dev, 0xffffffff)) { 1995 err = pci_set_dma_mask(pci_dev, 0xffffffff);
1996 if (err) {
1996 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); 1997 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
1997 err = -EIO;
1998 goto fail_context; 1998 goto fail_context;
1999 } 1999 }
2000 2000
diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c
index dbc695f32760..0042803a9de7 100644
--- a/drivers/media/pci/cx25821/cx25821-core.c
+++ b/drivers/media/pci/cx25821/cx25821-core.c
@@ -1319,7 +1319,8 @@ static int cx25821_initdev(struct pci_dev *pci_dev,
1319 dev->pci_lat, (unsigned long long)dev->base_io_addr); 1319 dev->pci_lat, (unsigned long long)dev->base_io_addr);
1320 1320
1321 pci_set_master(pci_dev); 1321 pci_set_master(pci_dev);
1322 if (!pci_set_dma_mask(pci_dev, 0xffffffff)) { 1322 err = pci_set_dma_mask(pci_dev, 0xffffffff);
1323 if (err) {
1323 pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); 1324 pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
1324 err = -EIO; 1325 err = -EIO;
1325 goto fail_irq; 1326 goto fail_irq;
diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c
index 0ed1b6530374..1b5268f9bb24 100644
--- a/drivers/media/pci/cx88/cx88-alsa.c
+++ b/drivers/media/pci/cx88/cx88-alsa.c
@@ -890,9 +890,9 @@ static int snd_cx88_create(struct snd_card *card, struct pci_dev *pci,
890 return err; 890 return err;
891 } 891 }
892 892
893 if (!pci_set_dma_mask(pci,DMA_BIT_MASK(32))) { 893 err = pci_set_dma_mask(pci,DMA_BIT_MASK(32));
894 if (err) {
894 dprintk(0, "%s/1: Oops: no 32bit PCI DMA ???\n",core->name); 895 dprintk(0, "%s/1: Oops: no 32bit PCI DMA ???\n",core->name);
895 err = -EIO;
896 cx88_core_put(core, pci); 896 cx88_core_put(core, pci);
897 return err; 897 return err;
898 } 898 }
diff --git a/drivers/media/pci/cx88/cx88-mpeg.c b/drivers/media/pci/cx88/cx88-mpeg.c
index 9db7767d1fe0..f34c229f9b37 100644
--- a/drivers/media/pci/cx88/cx88-mpeg.c
+++ b/drivers/media/pci/cx88/cx88-mpeg.c
@@ -393,7 +393,8 @@ static int cx8802_init_common(struct cx8802_dev *dev)
393 if (pci_enable_device(dev->pci)) 393 if (pci_enable_device(dev->pci))
394 return -EIO; 394 return -EIO;
395 pci_set_master(dev->pci); 395 pci_set_master(dev->pci);
396 if (!pci_set_dma_mask(dev->pci,DMA_BIT_MASK(32))) { 396 err = pci_set_dma_mask(dev->pci,DMA_BIT_MASK(32));
397 if (err) {
397 printk("%s/2: Oops: no 32bit PCI DMA ???\n",dev->core->name); 398 printk("%s/2: Oops: no 32bit PCI DMA ???\n",dev->core->name);
398 return -EIO; 399 return -EIO;
399 } 400 }
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index 0de1ad5a977d..aef9acf351f6 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -1314,9 +1314,9 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
1314 dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0)); 1314 dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0));
1315 1315
1316 pci_set_master(pci_dev); 1316 pci_set_master(pci_dev);
1317 if (!pci_set_dma_mask(pci_dev,DMA_BIT_MASK(32))) { 1317 err = pci_set_dma_mask(pci_dev,DMA_BIT_MASK(32));
1318 if (err) {
1318 printk("%s/0: Oops: no 32bit PCI DMA ???\n",core->name); 1319 printk("%s/0: Oops: no 32bit PCI DMA ???\n",core->name);
1319 err = -EIO;
1320 goto fail_core; 1320 goto fail_core;
1321 } 1321 }
1322 dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev); 1322 dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev);
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
index 60b2d462f98d..3fdbd81b5580 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
@@ -810,7 +810,7 @@ static int netup_unidvb_initdev(struct pci_dev *pci_dev,
810 "%s(): board vendor 0x%x, revision 0x%x\n", 810 "%s(): board vendor 0x%x, revision 0x%x\n",
811 __func__, board_vendor, board_revision); 811 __func__, board_vendor, board_revision);
812 pci_set_master(pci_dev); 812 pci_set_master(pci_dev);
813 if (!pci_set_dma_mask(pci_dev, 0xffffffff)) { 813 if (pci_set_dma_mask(pci_dev, 0xffffffff) < 0) {
814 dev_err(&pci_dev->dev, 814 dev_err(&pci_dev->dev,
815 "%s(): 32bit PCI DMA is not supported\n", __func__); 815 "%s(): 32bit PCI DMA is not supported\n", __func__);
816 goto pci_detect_err; 816 goto pci_detect_err;
diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
index e79d63eb774e..f720cea80e28 100644
--- a/drivers/media/pci/saa7134/saa7134-core.c
+++ b/drivers/media/pci/saa7134/saa7134-core.c
@@ -951,9 +951,9 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
951 pci_name(pci_dev), dev->pci_rev, pci_dev->irq, 951 pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
952 dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0)); 952 dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0));
953 pci_set_master(pci_dev); 953 pci_set_master(pci_dev);
954 if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) { 954 err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
955 if (err) {
955 pr_warn("%s: Oops: no 32bit PCI DMA ???\n", dev->name); 956 pr_warn("%s: Oops: no 32bit PCI DMA ???\n", dev->name);
956 err = -EIO;
957 goto fail1; 957 goto fail1;
958 } 958 }
959 959
diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
index 8f36b48ef733..8bbd092fbe1d 100644
--- a/drivers/media/pci/saa7164/saa7164-core.c
+++ b/drivers/media/pci/saa7164/saa7164-core.c
@@ -1264,9 +1264,9 @@ static int saa7164_initdev(struct pci_dev *pci_dev,
1264 1264
1265 pci_set_master(pci_dev); 1265 pci_set_master(pci_dev);
1266 /* TODO */ 1266 /* TODO */
1267 if (!pci_set_dma_mask(pci_dev, 0xffffffff)) { 1267 err = pci_set_dma_mask(pci_dev, 0xffffffff);
1268 if (err) {
1268 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); 1269 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
1269 err = -EIO;
1270 goto fail_irq; 1270 goto fail_irq;
1271 } 1271 }
1272 1272
diff --git a/drivers/media/pci/tw68/tw68-core.c b/drivers/media/pci/tw68/tw68-core.c
index 8c5655d351d3..4e77618fbb2b 100644
--- a/drivers/media/pci/tw68/tw68-core.c
+++ b/drivers/media/pci/tw68/tw68-core.c
@@ -257,9 +257,9 @@ static int tw68_initdev(struct pci_dev *pci_dev,
257 dev->name, pci_name(pci_dev), dev->pci_rev, pci_dev->irq, 257 dev->name, pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
258 dev->pci_lat, (u64)pci_resource_start(pci_dev, 0)); 258 dev->pci_lat, (u64)pci_resource_start(pci_dev, 0));
259 pci_set_master(pci_dev); 259 pci_set_master(pci_dev);
260 if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) { 260 err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
261 if (err) {
261 pr_info("%s: Oops: no 32bit PCI DMA ???\n", dev->name); 262 pr_info("%s: Oops: no 32bit PCI DMA ???\n", dev->name);
262 err = -EIO;
263 goto fail1; 263 goto fail1;
264 } 264 }
265 265
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 23b6c8e8701c..d8486168415a 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -65,8 +65,7 @@ MODULE_ALIAS("mmc:block");
65#define MMC_SANITIZE_REQ_TIMEOUT 240000 65#define MMC_SANITIZE_REQ_TIMEOUT 240000
66#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) 66#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
67 67
68#define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \ 68#define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \
69 (req->cmd_flags & REQ_META)) && \
70 (rq_data_dir(req) == WRITE)) 69 (rq_data_dir(req) == WRITE))
71#define PACKED_CMD_VER 0x01 70#define PACKED_CMD_VER 0x01
72#define PACKED_CMD_WR 0x02 71#define PACKED_CMD_WR 0x02
@@ -1467,13 +1466,9 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1467 1466
1468 /* 1467 /*
1469 * Reliable writes are used to implement Forced Unit Access and 1468 * Reliable writes are used to implement Forced Unit Access and
1470 * REQ_META accesses, and are supported only on MMCs. 1469 * are supported only on MMCs.
1471 *
1472 * XXX: this really needs a good explanation of why REQ_META
1473 * is treated special.
1474 */ 1470 */
1475 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) || 1471 bool do_rel_wr = (req->cmd_flags & REQ_FUA) &&
1476 (req->cmd_flags & REQ_META)) &&
1477 (rq_data_dir(req) == WRITE) && 1472 (rq_data_dir(req) == WRITE) &&
1478 (md->flags & MMC_BLK_REL_WR); 1473 (md->flags & MMC_BLK_REL_WR);
1479 1474
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index c793fda27321..3a9a79ec4343 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1040,9 +1040,24 @@ static int mmc_select_hs_ddr(struct mmc_card *card)
1040 return err; 1040 return err;
1041} 1041}
1042 1042
1043/* Caller must hold re-tuning */
1044static int mmc_switch_status(struct mmc_card *card)
1045{
1046 u32 status;
1047 int err;
1048
1049 err = mmc_send_status(card, &status);
1050 if (err)
1051 return err;
1052
1053 return mmc_switch_status_error(card->host, status);
1054}
1055
1043static int mmc_select_hs400(struct mmc_card *card) 1056static int mmc_select_hs400(struct mmc_card *card)
1044{ 1057{
1045 struct mmc_host *host = card->host; 1058 struct mmc_host *host = card->host;
1059 bool send_status = true;
1060 unsigned int max_dtr;
1046 int err = 0; 1061 int err = 0;
1047 u8 val; 1062 u8 val;
1048 1063
@@ -1053,25 +1068,36 @@ static int mmc_select_hs400(struct mmc_card *card)
1053 host->ios.bus_width == MMC_BUS_WIDTH_8)) 1068 host->ios.bus_width == MMC_BUS_WIDTH_8))
1054 return 0; 1069 return 0;
1055 1070
1056 /* 1071 if (host->caps & MMC_CAP_WAIT_WHILE_BUSY)
1057 * Before switching to dual data rate operation for HS400, 1072 send_status = false;
1058 * it is required to convert from HS200 mode to HS mode.
1059 */
1060 mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
1061 mmc_set_bus_speed(card);
1062 1073
1074 /* Reduce frequency to HS frequency */
1075 max_dtr = card->ext_csd.hs_max_dtr;
1076 mmc_set_clock(host, max_dtr);
1077
1078 /* Switch card to HS mode */
1063 val = EXT_CSD_TIMING_HS | 1079 val = EXT_CSD_TIMING_HS |
1064 card->drive_strength << EXT_CSD_DRV_STR_SHIFT; 1080 card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
1065 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1081 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1066 EXT_CSD_HS_TIMING, val, 1082 EXT_CSD_HS_TIMING, val,
1067 card->ext_csd.generic_cmd6_time, 1083 card->ext_csd.generic_cmd6_time,
1068 true, true, true); 1084 true, send_status, true);
1069 if (err) { 1085 if (err) {
1070 pr_err("%s: switch to high-speed from hs200 failed, err:%d\n", 1086 pr_err("%s: switch to high-speed from hs200 failed, err:%d\n",
1071 mmc_hostname(host), err); 1087 mmc_hostname(host), err);
1072 return err; 1088 return err;
1073 } 1089 }
1074 1090
1091 /* Set host controller to HS timing */
1092 mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
1093
1094 if (!send_status) {
1095 err = mmc_switch_status(card);
1096 if (err)
1097 goto out_err;
1098 }
1099
1100 /* Switch card to DDR */
1075 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1101 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1076 EXT_CSD_BUS_WIDTH, 1102 EXT_CSD_BUS_WIDTH,
1077 EXT_CSD_DDR_BUS_WIDTH_8, 1103 EXT_CSD_DDR_BUS_WIDTH_8,
@@ -1082,22 +1108,35 @@ static int mmc_select_hs400(struct mmc_card *card)
1082 return err; 1108 return err;
1083 } 1109 }
1084 1110
1111 /* Switch card to HS400 */
1085 val = EXT_CSD_TIMING_HS400 | 1112 val = EXT_CSD_TIMING_HS400 |
1086 card->drive_strength << EXT_CSD_DRV_STR_SHIFT; 1113 card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
1087 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1114 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1088 EXT_CSD_HS_TIMING, val, 1115 EXT_CSD_HS_TIMING, val,
1089 card->ext_csd.generic_cmd6_time, 1116 card->ext_csd.generic_cmd6_time,
1090 true, true, true); 1117 true, send_status, true);
1091 if (err) { 1118 if (err) {
1092 pr_err("%s: switch to hs400 failed, err:%d\n", 1119 pr_err("%s: switch to hs400 failed, err:%d\n",
1093 mmc_hostname(host), err); 1120 mmc_hostname(host), err);
1094 return err; 1121 return err;
1095 } 1122 }
1096 1123
1124 /* Set host controller to HS400 timing and frequency */
1097 mmc_set_timing(host, MMC_TIMING_MMC_HS400); 1125 mmc_set_timing(host, MMC_TIMING_MMC_HS400);
1098 mmc_set_bus_speed(card); 1126 mmc_set_bus_speed(card);
1099 1127
1128 if (!send_status) {
1129 err = mmc_switch_status(card);
1130 if (err)
1131 goto out_err;
1132 }
1133
1100 return 0; 1134 return 0;
1135
1136out_err:
1137 pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
1138 __func__, err);
1139 return err;
1101} 1140}
1102 1141
1103int mmc_hs200_to_hs400(struct mmc_card *card) 1142int mmc_hs200_to_hs400(struct mmc_card *card)
@@ -1105,19 +1144,6 @@ int mmc_hs200_to_hs400(struct mmc_card *card)
1105 return mmc_select_hs400(card); 1144 return mmc_select_hs400(card);
1106} 1145}
1107 1146
1108/* Caller must hold re-tuning */
1109static int mmc_switch_status(struct mmc_card *card)
1110{
1111 u32 status;
1112 int err;
1113
1114 err = mmc_send_status(card, &status);
1115 if (err)
1116 return err;
1117
1118 return mmc_switch_status_error(card->host, status);
1119}
1120
1121int mmc_hs400_to_hs200(struct mmc_card *card) 1147int mmc_hs400_to_hs200(struct mmc_card *card)
1122{ 1148{
1123 struct mmc_host *host = card->host; 1149 struct mmc_host *host = card->host;
@@ -1219,6 +1245,8 @@ static void mmc_select_driver_type(struct mmc_card *card)
1219static int mmc_select_hs200(struct mmc_card *card) 1245static int mmc_select_hs200(struct mmc_card *card)
1220{ 1246{
1221 struct mmc_host *host = card->host; 1247 struct mmc_host *host = card->host;
1248 bool send_status = true;
1249 unsigned int old_timing;
1222 int err = -EINVAL; 1250 int err = -EINVAL;
1223 u8 val; 1251 u8 val;
1224 1252
@@ -1234,6 +1262,9 @@ static int mmc_select_hs200(struct mmc_card *card)
1234 1262
1235 mmc_select_driver_type(card); 1263 mmc_select_driver_type(card);
1236 1264
1265 if (host->caps & MMC_CAP_WAIT_WHILE_BUSY)
1266 send_status = false;
1267
1237 /* 1268 /*
1238 * Set the bus width(4 or 8) with host's support and 1269 * Set the bus width(4 or 8) with host's support and
1239 * switch to HS200 mode if bus width is set successfully. 1270 * switch to HS200 mode if bus width is set successfully.
@@ -1245,11 +1276,25 @@ static int mmc_select_hs200(struct mmc_card *card)
1245 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1276 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1246 EXT_CSD_HS_TIMING, val, 1277 EXT_CSD_HS_TIMING, val,
1247 card->ext_csd.generic_cmd6_time, 1278 card->ext_csd.generic_cmd6_time,
1248 true, true, true); 1279 true, send_status, true);
1249 if (!err) 1280 if (err)
1250 mmc_set_timing(host, MMC_TIMING_MMC_HS200); 1281 goto err;
1282 old_timing = host->ios.timing;
1283 mmc_set_timing(host, MMC_TIMING_MMC_HS200);
1284 if (!send_status) {
1285 err = mmc_switch_status(card);
1286 /*
1287 * mmc_select_timing() assumes timing has not changed if
1288 * it is a switch error.
1289 */
1290 if (err == -EBADMSG)
1291 mmc_set_timing(host, old_timing);
1292 }
1251 } 1293 }
1252err: 1294err:
1295 if (err)
1296 pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
1297 __func__, err);
1253 return err; 1298 return err;
1254} 1299}
1255 1300
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index af71de5fda3b..1dee533634c9 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -473,6 +473,7 @@ config MMC_DAVINCI
473 473
474config MMC_GOLDFISH 474config MMC_GOLDFISH
475 tristate "goldfish qemu Multimedia Card Interface support" 475 tristate "goldfish qemu Multimedia Card Interface support"
476 depends on HAS_DMA
476 depends on GOLDFISH || COMPILE_TEST 477 depends on GOLDFISH || COMPILE_TEST
477 help 478 help
478 This selects the Goldfish Multimedia card Interface emulation 479 This selects the Goldfish Multimedia card Interface emulation
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 39568cc29a2a..33dfd7e72516 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -1276,7 +1276,7 @@ static struct msdc_delay_phase get_best_delay(struct msdc_host *host, u32 delay)
1276 int start = 0, len = 0; 1276 int start = 0, len = 0;
1277 int start_final = 0, len_final = 0; 1277 int start_final = 0, len_final = 0;
1278 u8 final_phase = 0xff; 1278 u8 final_phase = 0xff;
1279 struct msdc_delay_phase delay_phase; 1279 struct msdc_delay_phase delay_phase = { 0, };
1280 1280
1281 if (delay == 0) { 1281 if (delay == 0) {
1282 dev_err(host->dev, "phase error: [map:%x]\n", delay); 1282 dev_err(host->dev, "phase error: [map:%x]\n", delay);
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 8cadd74e8407..ce08896b9d69 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -805,7 +805,7 @@ static int pxamci_probe(struct platform_device *pdev)
805 goto out; 805 goto out;
806 } else { 806 } else {
807 mmc->caps |= host->pdata->gpio_card_ro_invert ? 807 mmc->caps |= host->pdata->gpio_card_ro_invert ?
808 MMC_CAP2_RO_ACTIVE_HIGH : 0; 808 0 : MMC_CAP2_RO_ACTIVE_HIGH;
809 } 809 }
810 810
811 if (gpio_is_valid(gpio_cd)) 811 if (gpio_is_valid(gpio_cd))
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index dc4e8446f1ff..5a99a93ed025 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -25,6 +25,7 @@
25 25
26#include <linux/gpio.h> 26#include <linux/gpio.h>
27 27
28#include <asm/mach-jz4740/gpio.h>
28#include <asm/mach-jz4740/jz4740_nand.h> 29#include <asm/mach-jz4740/jz4740_nand.h>
29 30
30#define JZ_REG_NAND_CTRL 0x50 31#define JZ_REG_NAND_CTRL 0x50
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index cc74142938b0..ece544efccc3 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -3110,7 +3110,7 @@ static void nand_resume(struct mtd_info *mtd)
3110 */ 3110 */
3111static void nand_shutdown(struct mtd_info *mtd) 3111static void nand_shutdown(struct mtd_info *mtd)
3112{ 3112{
3113 nand_get_device(mtd, FL_SHUTDOWN); 3113 nand_get_device(mtd, FL_PM_SUSPENDED);
3114} 3114}
3115 3115
3116/* Set default functions */ 3116/* Set default functions */
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 57dadd52b428..1deb8ff90a89 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -501,8 +501,6 @@ static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
501 cf->data[2] |= CAN_ERR_PROT_FORM; 501 cf->data[2] |= CAN_ERR_PROT_FORM;
502 else if (status & SER) 502 else if (status & SER)
503 cf->data[2] |= CAN_ERR_PROT_STUFF; 503 cf->data[2] |= CAN_ERR_PROT_STUFF;
504 else
505 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
506 } 504 }
507 505
508 priv->can.state = state; 506 priv->can.state = state;
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 5d214d135332..f91b094288da 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -962,7 +962,6 @@ static int c_can_handle_bus_err(struct net_device *dev,
962 * type of the last error to occur on the CAN bus 962 * type of the last error to occur on the CAN bus
963 */ 963 */
964 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; 964 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
965 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
966 965
967 switch (lec_type) { 966 switch (lec_type) {
968 case LEC_STUFF_ERROR: 967 case LEC_STUFF_ERROR:
@@ -975,8 +974,7 @@ static int c_can_handle_bus_err(struct net_device *dev,
975 break; 974 break;
976 case LEC_ACK_ERROR: 975 case LEC_ACK_ERROR:
977 netdev_dbg(dev, "ack error\n"); 976 netdev_dbg(dev, "ack error\n");
978 cf->data[3] |= (CAN_ERR_PROT_LOC_ACK | 977 cf->data[3] = CAN_ERR_PROT_LOC_ACK;
979 CAN_ERR_PROT_LOC_ACK_DEL);
980 break; 978 break;
981 case LEC_BIT1_ERROR: 979 case LEC_BIT1_ERROR:
982 netdev_dbg(dev, "bit1 error\n"); 980 netdev_dbg(dev, "bit1 error\n");
@@ -988,8 +986,7 @@ static int c_can_handle_bus_err(struct net_device *dev,
988 break; 986 break;
989 case LEC_CRC_ERROR: 987 case LEC_CRC_ERROR:
990 netdev_dbg(dev, "CRC error\n"); 988 netdev_dbg(dev, "CRC error\n");
991 cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ | 989 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
992 CAN_ERR_PROT_LOC_CRC_DEL);
993 break; 990 break;
994 default: 991 default:
995 break; 992 break;
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index 70a8cbb29e75..1e37313054f3 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -578,7 +578,7 @@ static int cc770_err(struct net_device *dev, u8 status)
578 cf->data[2] |= CAN_ERR_PROT_BIT0; 578 cf->data[2] |= CAN_ERR_PROT_BIT0;
579 break; 579 break;
580 case STAT_LEC_CRC: 580 case STAT_LEC_CRC:
581 cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; 581 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
582 break; 582 break;
583 } 583 }
584 } 584 }
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 868fe945e35a..41c0fc9f3b14 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -535,13 +535,13 @@ static void do_bus_err(struct net_device *dev,
535 if (reg_esr & FLEXCAN_ESR_ACK_ERR) { 535 if (reg_esr & FLEXCAN_ESR_ACK_ERR) {
536 netdev_dbg(dev, "ACK_ERR irq\n"); 536 netdev_dbg(dev, "ACK_ERR irq\n");
537 cf->can_id |= CAN_ERR_ACK; 537 cf->can_id |= CAN_ERR_ACK;
538 cf->data[3] |= CAN_ERR_PROT_LOC_ACK; 538 cf->data[3] = CAN_ERR_PROT_LOC_ACK;
539 tx_errors = 1; 539 tx_errors = 1;
540 } 540 }
541 if (reg_esr & FLEXCAN_ESR_CRC_ERR) { 541 if (reg_esr & FLEXCAN_ESR_CRC_ERR) {
542 netdev_dbg(dev, "CRC_ERR irq\n"); 542 netdev_dbg(dev, "CRC_ERR irq\n");
543 cf->data[2] |= CAN_ERR_PROT_BIT; 543 cf->data[2] |= CAN_ERR_PROT_BIT;
544 cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; 544 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
545 rx_errors = 1; 545 rx_errors = 1;
546 } 546 }
547 if (reg_esr & FLEXCAN_ESR_FRM_ERR) { 547 if (reg_esr & FLEXCAN_ESR_FRM_ERR) {
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index c1e85368a198..5d04f5464faf 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1096,7 +1096,6 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
1096 cf->data[2] |= CAN_ERR_PROT_STUFF; 1096 cf->data[2] |= CAN_ERR_PROT_STUFF;
1097 break; 1097 break;
1098 default: 1098 default:
1099 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
1100 cf->data[3] = ecc & ECC_SEG; 1099 cf->data[3] = ecc & ECC_SEG;
1101 break; 1100 break;
1102 } 1101 }
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index ef655177bb5e..39cf911f7a1e 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -487,7 +487,6 @@ static int m_can_handle_lec_err(struct net_device *dev,
487 * type of the last error to occur on the CAN bus 487 * type of the last error to occur on the CAN bus
488 */ 488 */
489 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; 489 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
490 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
491 490
492 switch (lec_type) { 491 switch (lec_type) {
493 case LEC_STUFF_ERROR: 492 case LEC_STUFF_ERROR:
@@ -500,8 +499,7 @@ static int m_can_handle_lec_err(struct net_device *dev,
500 break; 499 break;
501 case LEC_ACK_ERROR: 500 case LEC_ACK_ERROR:
502 netdev_dbg(dev, "ack error\n"); 501 netdev_dbg(dev, "ack error\n");
503 cf->data[3] |= (CAN_ERR_PROT_LOC_ACK | 502 cf->data[3] = CAN_ERR_PROT_LOC_ACK;
504 CAN_ERR_PROT_LOC_ACK_DEL);
505 break; 503 break;
506 case LEC_BIT1_ERROR: 504 case LEC_BIT1_ERROR:
507 netdev_dbg(dev, "bit1 error\n"); 505 netdev_dbg(dev, "bit1 error\n");
@@ -513,8 +511,7 @@ static int m_can_handle_lec_err(struct net_device *dev,
513 break; 511 break;
514 case LEC_CRC_ERROR: 512 case LEC_CRC_ERROR:
515 netdev_dbg(dev, "CRC error\n"); 513 netdev_dbg(dev, "CRC error\n");
516 cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ | 514 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
517 CAN_ERR_PROT_LOC_CRC_DEL);
518 break; 515 break;
519 default: 516 default:
520 break; 517 break;
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index e187ca783da0..c1317889d3d8 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -559,8 +559,7 @@ static void pch_can_error(struct net_device *ndev, u32 status)
559 stats->rx_errors++; 559 stats->rx_errors++;
560 break; 560 break;
561 case PCH_CRC_ERR: 561 case PCH_CRC_ERR:
562 cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ | 562 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
563 CAN_ERR_PROT_LOC_CRC_DEL;
564 priv->can.can_stats.bus_error++; 563 priv->can.can_stats.bus_error++;
565 stats->rx_errors++; 564 stats->rx_errors++;
566 break; 565 break;
diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c
index 7bd54191f962..bc46be39549d 100644
--- a/drivers/net/can/rcar_can.c
+++ b/drivers/net/can/rcar_can.c
@@ -241,17 +241,16 @@ static void rcar_can_error(struct net_device *ndev)
241 u8 ecsr; 241 u8 ecsr;
242 242
243 netdev_dbg(priv->ndev, "Bus error interrupt:\n"); 243 netdev_dbg(priv->ndev, "Bus error interrupt:\n");
244 if (skb) { 244 if (skb)
245 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; 245 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
246 cf->data[2] = CAN_ERR_PROT_UNSPEC; 246
247 }
248 ecsr = readb(&priv->regs->ecsr); 247 ecsr = readb(&priv->regs->ecsr);
249 if (ecsr & RCAR_CAN_ECSR_ADEF) { 248 if (ecsr & RCAR_CAN_ECSR_ADEF) {
250 netdev_dbg(priv->ndev, "ACK Delimiter Error\n"); 249 netdev_dbg(priv->ndev, "ACK Delimiter Error\n");
251 tx_errors++; 250 tx_errors++;
252 writeb(~RCAR_CAN_ECSR_ADEF, &priv->regs->ecsr); 251 writeb(~RCAR_CAN_ECSR_ADEF, &priv->regs->ecsr);
253 if (skb) 252 if (skb)
254 cf->data[3] |= CAN_ERR_PROT_LOC_ACK_DEL; 253 cf->data[3] = CAN_ERR_PROT_LOC_ACK_DEL;
255 } 254 }
256 if (ecsr & RCAR_CAN_ECSR_BE0F) { 255 if (ecsr & RCAR_CAN_ECSR_BE0F) {
257 netdev_dbg(priv->ndev, "Bit Error (dominant)\n"); 256 netdev_dbg(priv->ndev, "Bit Error (dominant)\n");
@@ -272,7 +271,7 @@ static void rcar_can_error(struct net_device *ndev)
272 rx_errors++; 271 rx_errors++;
273 writeb(~RCAR_CAN_ECSR_CEF, &priv->regs->ecsr); 272 writeb(~RCAR_CAN_ECSR_CEF, &priv->regs->ecsr);
274 if (skb) 273 if (skb)
275 cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; 274 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
276 } 275 }
277 if (ecsr & RCAR_CAN_ECSR_AEF) { 276 if (ecsr & RCAR_CAN_ECSR_AEF) {
278 netdev_dbg(priv->ndev, "ACK Error\n"); 277 netdev_dbg(priv->ndev, "ACK Error\n");
@@ -280,7 +279,7 @@ static void rcar_can_error(struct net_device *ndev)
280 writeb(~RCAR_CAN_ECSR_AEF, &priv->regs->ecsr); 279 writeb(~RCAR_CAN_ECSR_AEF, &priv->regs->ecsr);
281 if (skb) { 280 if (skb) {
282 cf->can_id |= CAN_ERR_ACK; 281 cf->can_id |= CAN_ERR_ACK;
283 cf->data[3] |= CAN_ERR_PROT_LOC_ACK; 282 cf->data[3] = CAN_ERR_PROT_LOC_ACK;
284 } 283 }
285 } 284 }
286 if (ecsr & RCAR_CAN_ECSR_FEF) { 285 if (ecsr & RCAR_CAN_ECSR_FEF) {
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 7b92e911a616..8dda3b703d39 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -218,6 +218,9 @@ static void sja1000_start(struct net_device *dev)
218 priv->write_reg(priv, SJA1000_RXERR, 0x0); 218 priv->write_reg(priv, SJA1000_RXERR, 0x0);
219 priv->read_reg(priv, SJA1000_ECC); 219 priv->read_reg(priv, SJA1000_ECC);
220 220
221 /* clear interrupt flags */
222 priv->read_reg(priv, SJA1000_IR);
223
221 /* leave reset mode */ 224 /* leave reset mode */
222 set_normal_mode(dev); 225 set_normal_mode(dev);
223} 226}
@@ -446,7 +449,6 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
446 cf->data[2] |= CAN_ERR_PROT_STUFF; 449 cf->data[2] |= CAN_ERR_PROT_STUFF;
447 break; 450 break;
448 default: 451 default:
449 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
450 cf->data[3] = ecc & ECC_SEG; 452 cf->data[3] = ecc & ECC_SEG;
451 break; 453 break;
452 } 454 }
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index d9a42c646783..68ef0a4cd821 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -575,7 +575,6 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
575 cf->data[2] |= CAN_ERR_PROT_STUFF; 575 cf->data[2] |= CAN_ERR_PROT_STUFF;
576 break; 576 break;
577 default: 577 default:
578 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
579 cf->data[3] = (ecc & SUN4I_STA_ERR_SEG_CODE) 578 cf->data[3] = (ecc & SUN4I_STA_ERR_SEG_CODE)
580 >> 16; 579 >> 16;
581 break; 580 break;
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index cf345cbfe819..680d1ff07a55 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -722,7 +722,6 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
722 if (err_status & HECC_BUS_ERROR) { 722 if (err_status & HECC_BUS_ERROR) {
723 ++priv->can.can_stats.bus_error; 723 ++priv->can.can_stats.bus_error;
724 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; 724 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
725 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
726 if (err_status & HECC_CANES_FE) { 725 if (err_status & HECC_CANES_FE) {
727 hecc_set_bit(priv, HECC_CANES, HECC_CANES_FE); 726 hecc_set_bit(priv, HECC_CANES, HECC_CANES_FE);
728 cf->data[2] |= CAN_ERR_PROT_FORM; 727 cf->data[2] |= CAN_ERR_PROT_FORM;
@@ -737,13 +736,11 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
737 } 736 }
738 if (err_status & HECC_CANES_CRCE) { 737 if (err_status & HECC_CANES_CRCE) {
739 hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE); 738 hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE);
740 cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ | 739 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
741 CAN_ERR_PROT_LOC_CRC_DEL;
742 } 740 }
743 if (err_status & HECC_CANES_ACKE) { 741 if (err_status & HECC_CANES_ACKE) {
744 hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE); 742 hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE);
745 cf->data[3] |= CAN_ERR_PROT_LOC_ACK | 743 cf->data[3] = CAN_ERR_PROT_LOC_ACK;
746 CAN_ERR_PROT_LOC_ACK_DEL;
747 } 744 }
748 } 745 }
749 746
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 2d390384ef3b..fc5b75675cd8 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -377,7 +377,6 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
377 cf->data[2] |= CAN_ERR_PROT_STUFF; 377 cf->data[2] |= CAN_ERR_PROT_STUFF;
378 break; 378 break;
379 default: 379 default:
380 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
381 cf->data[3] = ecc & SJA1000_ECC_SEG; 380 cf->data[3] = ecc & SJA1000_ECC_SEG;
382 break; 381 break;
383 } 382 }
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 0e5a4493ba4f..113e64fcd73b 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -282,7 +282,6 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
282 cf->data[2] |= CAN_ERR_PROT_STUFF; 282 cf->data[2] |= CAN_ERR_PROT_STUFF;
283 break; 283 break;
284 default: 284 default:
285 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
286 cf->data[3] = ecc & SJA1000_ECC_SEG; 285 cf->data[3] = ecc & SJA1000_ECC_SEG;
287 break; 286 break;
288 } 287 }
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 8b17a9065b0b..022bfa13ebfa 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -944,10 +944,9 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
944 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; 944 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
945 945
946 if (es->leaf.error_factor & M16C_EF_ACKE) 946 if (es->leaf.error_factor & M16C_EF_ACKE)
947 cf->data[3] |= (CAN_ERR_PROT_LOC_ACK); 947 cf->data[3] = CAN_ERR_PROT_LOC_ACK;
948 if (es->leaf.error_factor & M16C_EF_CRCE) 948 if (es->leaf.error_factor & M16C_EF_CRCE)
949 cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ | 949 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
950 CAN_ERR_PROT_LOC_CRC_DEL);
951 if (es->leaf.error_factor & M16C_EF_FORME) 950 if (es->leaf.error_factor & M16C_EF_FORME)
952 cf->data[2] |= CAN_ERR_PROT_FORM; 951 cf->data[2] |= CAN_ERR_PROT_FORM;
953 if (es->leaf.error_factor & M16C_EF_STFE) 952 if (es->leaf.error_factor & M16C_EF_STFE)
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index de95b1ccba3e..a731720f1d13 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -401,9 +401,7 @@ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv,
401 tx_errors = 1; 401 tx_errors = 1;
402 break; 402 break;
403 case USB_8DEV_STATUSMSG_CRC: 403 case USB_8DEV_STATUSMSG_CRC:
404 cf->data[2] |= CAN_ERR_PROT_UNSPEC; 404 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
405 cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ |
406 CAN_ERR_PROT_LOC_CRC_DEL;
407 rx_errors = 1; 405 rx_errors = 1;
408 break; 406 break;
409 case USB_8DEV_STATUSMSG_BIT0: 407 case USB_8DEV_STATUSMSG_BIT0:
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index fc55e8e0351d..51670b322409 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -608,17 +608,15 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
608 608
609 /* Check for error interrupt */ 609 /* Check for error interrupt */
610 if (isr & XCAN_IXR_ERROR_MASK) { 610 if (isr & XCAN_IXR_ERROR_MASK) {
611 if (skb) { 611 if (skb)
612 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; 612 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
613 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
614 }
615 613
616 /* Check for Ack error interrupt */ 614 /* Check for Ack error interrupt */
617 if (err_status & XCAN_ESR_ACKER_MASK) { 615 if (err_status & XCAN_ESR_ACKER_MASK) {
618 stats->tx_errors++; 616 stats->tx_errors++;
619 if (skb) { 617 if (skb) {
620 cf->can_id |= CAN_ERR_ACK; 618 cf->can_id |= CAN_ERR_ACK;
621 cf->data[3] |= CAN_ERR_PROT_LOC_ACK; 619 cf->data[3] = CAN_ERR_PROT_LOC_ACK;
622 } 620 }
623 } 621 }
624 622
@@ -654,8 +652,7 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
654 stats->rx_errors++; 652 stats->rx_errors++;
655 if (skb) { 653 if (skb) {
656 cf->can_id |= CAN_ERR_PROT; 654 cf->can_id |= CAN_ERR_PROT;
657 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ | 655 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
658 CAN_ERR_PROT_LOC_CRC_DEL;
659 } 656 }
660 } 657 }
661 priv->can.can_stats.bus_error++; 658 priv->can.can_stats.bus_error++;
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index 9093577755f6..0527f485c3dc 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -15,9 +15,7 @@
15#include <linux/netdevice.h> 15#include <linux/netdevice.h>
16#include <linux/phy.h> 16#include <linux/phy.h>
17#include <net/dsa.h> 17#include <net/dsa.h>
18 18#include "mv88e6060.h"
19#define REG_PORT(p) (8 + (p))
20#define REG_GLOBAL 0x0f
21 19
22static int reg_read(struct dsa_switch *ds, int addr, int reg) 20static int reg_read(struct dsa_switch *ds, int addr, int reg)
23{ 21{
@@ -67,13 +65,14 @@ static char *mv88e6060_probe(struct device *host_dev, int sw_addr)
67 if (bus == NULL) 65 if (bus == NULL)
68 return NULL; 66 return NULL;
69 67
70 ret = mdiobus_read(bus, sw_addr + REG_PORT(0), 0x03); 68 ret = mdiobus_read(bus, sw_addr + REG_PORT(0), PORT_SWITCH_ID);
71 if (ret >= 0) { 69 if (ret >= 0) {
72 if (ret == 0x0600) 70 if (ret == PORT_SWITCH_ID_6060)
73 return "Marvell 88E6060 (A0)"; 71 return "Marvell 88E6060 (A0)";
74 if (ret == 0x0601 || ret == 0x0602) 72 if (ret == PORT_SWITCH_ID_6060_R1 ||
73 ret == PORT_SWITCH_ID_6060_R2)
75 return "Marvell 88E6060 (B0)"; 74 return "Marvell 88E6060 (B0)";
76 if ((ret & 0xfff0) == 0x0600) 75 if ((ret & PORT_SWITCH_ID_6060_MASK) == PORT_SWITCH_ID_6060)
77 return "Marvell 88E6060"; 76 return "Marvell 88E6060";
78 } 77 }
79 78
@@ -87,22 +86,26 @@ static int mv88e6060_switch_reset(struct dsa_switch *ds)
87 unsigned long timeout; 86 unsigned long timeout;
88 87
89 /* Set all ports to the disabled state. */ 88 /* Set all ports to the disabled state. */
90 for (i = 0; i < 6; i++) { 89 for (i = 0; i < MV88E6060_PORTS; i++) {
91 ret = REG_READ(REG_PORT(i), 0x04); 90 ret = REG_READ(REG_PORT(i), PORT_CONTROL);
92 REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc); 91 REG_WRITE(REG_PORT(i), PORT_CONTROL,
92 ret & ~PORT_CONTROL_STATE_MASK);
93 } 93 }
94 94
95 /* Wait for transmit queues to drain. */ 95 /* Wait for transmit queues to drain. */
96 usleep_range(2000, 4000); 96 usleep_range(2000, 4000);
97 97
98 /* Reset the switch. */ 98 /* Reset the switch. */
99 REG_WRITE(REG_GLOBAL, 0x0a, 0xa130); 99 REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
100 GLOBAL_ATU_CONTROL_SWRESET |
101 GLOBAL_ATU_CONTROL_ATUSIZE_1024 |
102 GLOBAL_ATU_CONTROL_ATE_AGE_5MIN);
100 103
101 /* Wait up to one second for reset to complete. */ 104 /* Wait up to one second for reset to complete. */
102 timeout = jiffies + 1 * HZ; 105 timeout = jiffies + 1 * HZ;
103 while (time_before(jiffies, timeout)) { 106 while (time_before(jiffies, timeout)) {
104 ret = REG_READ(REG_GLOBAL, 0x00); 107 ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
105 if ((ret & 0x8000) == 0x0000) 108 if (ret & GLOBAL_STATUS_INIT_READY)
106 break; 109 break;
107 110
108 usleep_range(1000, 2000); 111 usleep_range(1000, 2000);
@@ -119,13 +122,15 @@ static int mv88e6060_setup_global(struct dsa_switch *ds)
119 * set the maximum frame size to 1536 bytes, and mask all 122 * set the maximum frame size to 1536 bytes, and mask all
120 * interrupt sources. 123 * interrupt sources.
121 */ 124 */
122 REG_WRITE(REG_GLOBAL, 0x04, 0x0800); 125 REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, GLOBAL_CONTROL_MAX_FRAME_1536);
123 126
124 /* Enable automatic address learning, set the address 127 /* Enable automatic address learning, set the address
125 * database size to 1024 entries, and set the default aging 128 * database size to 1024 entries, and set the default aging
126 * time to 5 minutes. 129 * time to 5 minutes.
127 */ 130 */
128 REG_WRITE(REG_GLOBAL, 0x0a, 0x2130); 131 REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
132 GLOBAL_ATU_CONTROL_ATUSIZE_1024 |
133 GLOBAL_ATU_CONTROL_ATE_AGE_5MIN);
129 134
130 return 0; 135 return 0;
131} 136}
@@ -139,25 +144,30 @@ static int mv88e6060_setup_port(struct dsa_switch *ds, int p)
139 * state to Forwarding. Additionally, if this is the CPU 144 * state to Forwarding. Additionally, if this is the CPU
140 * port, enable Ingress and Egress Trailer tagging mode. 145 * port, enable Ingress and Egress Trailer tagging mode.
141 */ 146 */
142 REG_WRITE(addr, 0x04, dsa_is_cpu_port(ds, p) ? 0x4103 : 0x0003); 147 REG_WRITE(addr, PORT_CONTROL,
148 dsa_is_cpu_port(ds, p) ?
149 PORT_CONTROL_TRAILER |
150 PORT_CONTROL_INGRESS_MODE |
151 PORT_CONTROL_STATE_FORWARDING :
152 PORT_CONTROL_STATE_FORWARDING);
143 153
144 /* Port based VLAN map: give each port its own address 154 /* Port based VLAN map: give each port its own address
145 * database, allow the CPU port to talk to each of the 'real' 155 * database, allow the CPU port to talk to each of the 'real'
146 * ports, and allow each of the 'real' ports to only talk to 156 * ports, and allow each of the 'real' ports to only talk to
147 * the CPU port. 157 * the CPU port.
148 */ 158 */
149 REG_WRITE(addr, 0x06, 159 REG_WRITE(addr, PORT_VLAN_MAP,
150 ((p & 0xf) << 12) | 160 ((p & 0xf) << PORT_VLAN_MAP_DBNUM_SHIFT) |
151 (dsa_is_cpu_port(ds, p) ? 161 (dsa_is_cpu_port(ds, p) ?
152 ds->phys_port_mask : 162 ds->phys_port_mask :
153 (1 << ds->dst->cpu_port))); 163 BIT(ds->dst->cpu_port)));
154 164
155 /* Port Association Vector: when learning source addresses 165 /* Port Association Vector: when learning source addresses
156 * of packets, add the address to the address database using 166 * of packets, add the address to the address database using
157 * a port bitmap that has only the bit for this port set and 167 * a port bitmap that has only the bit for this port set and
158 * the other bits clear. 168 * the other bits clear.
159 */ 169 */
160 REG_WRITE(addr, 0x0b, 1 << p); 170 REG_WRITE(addr, PORT_ASSOC_VECTOR, BIT(p));
161 171
162 return 0; 172 return 0;
163} 173}
@@ -177,7 +187,7 @@ static int mv88e6060_setup(struct dsa_switch *ds)
177 if (ret < 0) 187 if (ret < 0)
178 return ret; 188 return ret;
179 189
180 for (i = 0; i < 6; i++) { 190 for (i = 0; i < MV88E6060_PORTS; i++) {
181 ret = mv88e6060_setup_port(ds, i); 191 ret = mv88e6060_setup_port(ds, i);
182 if (ret < 0) 192 if (ret < 0)
183 return ret; 193 return ret;
@@ -188,16 +198,17 @@ static int mv88e6060_setup(struct dsa_switch *ds)
188 198
189static int mv88e6060_set_addr(struct dsa_switch *ds, u8 *addr) 199static int mv88e6060_set_addr(struct dsa_switch *ds, u8 *addr)
190{ 200{
191 REG_WRITE(REG_GLOBAL, 0x01, (addr[0] << 8) | addr[1]); 201 /* Use the same MAC Address as FD Pause frames for all ports */
192 REG_WRITE(REG_GLOBAL, 0x02, (addr[2] << 8) | addr[3]); 202 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 9) | addr[1]);
193 REG_WRITE(REG_GLOBAL, 0x03, (addr[4] << 8) | addr[5]); 203 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
204 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
194 205
195 return 0; 206 return 0;
196} 207}
197 208
198static int mv88e6060_port_to_phy_addr(int port) 209static int mv88e6060_port_to_phy_addr(int port)
199{ 210{
200 if (port >= 0 && port <= 5) 211 if (port >= 0 && port < MV88E6060_PORTS)
201 return port; 212 return port;
202 return -1; 213 return -1;
203} 214}
@@ -225,54 +236,6 @@ mv88e6060_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
225 return reg_write(ds, addr, regnum, val); 236 return reg_write(ds, addr, regnum, val);
226} 237}
227 238
228static void mv88e6060_poll_link(struct dsa_switch *ds)
229{
230 int i;
231
232 for (i = 0; i < DSA_MAX_PORTS; i++) {
233 struct net_device *dev;
234 int uninitialized_var(port_status);
235 int link;
236 int speed;
237 int duplex;
238 int fc;
239
240 dev = ds->ports[i];
241 if (dev == NULL)
242 continue;
243
244 link = 0;
245 if (dev->flags & IFF_UP) {
246 port_status = reg_read(ds, REG_PORT(i), 0x00);
247 if (port_status < 0)
248 continue;
249
250 link = !!(port_status & 0x1000);
251 }
252
253 if (!link) {
254 if (netif_carrier_ok(dev)) {
255 netdev_info(dev, "link down\n");
256 netif_carrier_off(dev);
257 }
258 continue;
259 }
260
261 speed = (port_status & 0x0100) ? 100 : 10;
262 duplex = (port_status & 0x0200) ? 1 : 0;
263 fc = ((port_status & 0xc000) == 0xc000) ? 1 : 0;
264
265 if (!netif_carrier_ok(dev)) {
266 netdev_info(dev,
267 "link up, %d Mb/s, %s duplex, flow control %sabled\n",
268 speed,
269 duplex ? "full" : "half",
270 fc ? "en" : "dis");
271 netif_carrier_on(dev);
272 }
273 }
274}
275
276static struct dsa_switch_driver mv88e6060_switch_driver = { 239static struct dsa_switch_driver mv88e6060_switch_driver = {
277 .tag_protocol = DSA_TAG_PROTO_TRAILER, 240 .tag_protocol = DSA_TAG_PROTO_TRAILER,
278 .probe = mv88e6060_probe, 241 .probe = mv88e6060_probe,
@@ -280,7 +243,6 @@ static struct dsa_switch_driver mv88e6060_switch_driver = {
280 .set_addr = mv88e6060_set_addr, 243 .set_addr = mv88e6060_set_addr,
281 .phy_read = mv88e6060_phy_read, 244 .phy_read = mv88e6060_phy_read,
282 .phy_write = mv88e6060_phy_write, 245 .phy_write = mv88e6060_phy_write,
283 .poll_link = mv88e6060_poll_link,
284}; 246};
285 247
286static int __init mv88e6060_init(void) 248static int __init mv88e6060_init(void)
diff --git a/drivers/net/dsa/mv88e6060.h b/drivers/net/dsa/mv88e6060.h
new file mode 100644
index 000000000000..cc9b2ed4aff4
--- /dev/null
+++ b/drivers/net/dsa/mv88e6060.h
@@ -0,0 +1,111 @@
1/*
2 * drivers/net/dsa/mv88e6060.h - Marvell 88e6060 switch chip support
3 * Copyright (c) 2015 Neil Armstrong
4 *
5 * Based on mv88e6xxx.h
6 * Copyright (c) 2008 Marvell Semiconductor
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef __MV88E6060_H
15#define __MV88E6060_H
16
17#define MV88E6060_PORTS 6
18
19#define REG_PORT(p) (0x8 + (p))
20#define PORT_STATUS 0x00
21#define PORT_STATUS_PAUSE_EN BIT(15)
22#define PORT_STATUS_MY_PAUSE BIT(14)
23#define PORT_STATUS_FC (PORT_STATUS_MY_PAUSE | PORT_STATUS_PAUSE_EN)
24#define PORT_STATUS_RESOLVED BIT(13)
25#define PORT_STATUS_LINK BIT(12)
26#define PORT_STATUS_PORTMODE BIT(11)
27#define PORT_STATUS_PHYMODE BIT(10)
28#define PORT_STATUS_DUPLEX BIT(9)
29#define PORT_STATUS_SPEED BIT(8)
30#define PORT_SWITCH_ID 0x03
31#define PORT_SWITCH_ID_6060 0x0600
32#define PORT_SWITCH_ID_6060_MASK 0xfff0
33#define PORT_SWITCH_ID_6060_R1 0x0601
34#define PORT_SWITCH_ID_6060_R2 0x0602
35#define PORT_CONTROL 0x04
36#define PORT_CONTROL_FORCE_FLOW_CTRL BIT(15)
37#define PORT_CONTROL_TRAILER BIT(14)
38#define PORT_CONTROL_HEADER BIT(11)
39#define PORT_CONTROL_INGRESS_MODE BIT(8)
40#define PORT_CONTROL_VLAN_TUNNEL BIT(7)
41#define PORT_CONTROL_STATE_MASK 0x03
42#define PORT_CONTROL_STATE_DISABLED 0x00
43#define PORT_CONTROL_STATE_BLOCKING 0x01
44#define PORT_CONTROL_STATE_LEARNING 0x02
45#define PORT_CONTROL_STATE_FORWARDING 0x03
46#define PORT_VLAN_MAP 0x06
47#define PORT_VLAN_MAP_DBNUM_SHIFT 12
48#define PORT_VLAN_MAP_TABLE_MASK 0x1f
49#define PORT_ASSOC_VECTOR 0x0b
50#define PORT_ASSOC_VECTOR_MONITOR BIT(15)
51#define PORT_ASSOC_VECTOR_PAV_MASK 0x1f
52#define PORT_RX_CNTR 0x10
53#define PORT_TX_CNTR 0x11
54
55#define REG_GLOBAL 0x0f
56#define GLOBAL_STATUS 0x00
57#define GLOBAL_STATUS_SW_MODE_MASK (0x3 << 12)
58#define GLOBAL_STATUS_SW_MODE_0 (0x0 << 12)
59#define GLOBAL_STATUS_SW_MODE_1 (0x1 << 12)
60#define GLOBAL_STATUS_SW_MODE_2 (0x2 << 12)
61#define GLOBAL_STATUS_SW_MODE_3 (0x3 << 12)
62#define GLOBAL_STATUS_INIT_READY BIT(11)
63#define GLOBAL_STATUS_ATU_FULL BIT(3)
64#define GLOBAL_STATUS_ATU_DONE BIT(2)
65#define GLOBAL_STATUS_PHY_INT BIT(1)
66#define GLOBAL_STATUS_EEINT BIT(0)
67#define GLOBAL_MAC_01 0x01
68#define GLOBAL_MAC_01_DIFF_ADDR BIT(8)
69#define GLOBAL_MAC_23 0x02
70#define GLOBAL_MAC_45 0x03
71#define GLOBAL_CONTROL 0x04
72#define GLOBAL_CONTROL_DISCARD_EXCESS BIT(13)
73#define GLOBAL_CONTROL_MAX_FRAME_1536 BIT(10)
74#define GLOBAL_CONTROL_RELOAD_EEPROM BIT(9)
75#define GLOBAL_CONTROL_CTRMODE BIT(8)
76#define GLOBAL_CONTROL_ATU_FULL_EN BIT(3)
77#define GLOBAL_CONTROL_ATU_DONE_EN BIT(2)
78#define GLOBAL_CONTROL_PHYINT_EN BIT(1)
79#define GLOBAL_CONTROL_EEPROM_DONE_EN BIT(0)
80#define GLOBAL_ATU_CONTROL 0x0a
81#define GLOBAL_ATU_CONTROL_SWRESET BIT(15)
82#define GLOBAL_ATU_CONTROL_LEARNDIS BIT(14)
83#define GLOBAL_ATU_CONTROL_ATUSIZE_256 (0x0 << 12)
84#define GLOBAL_ATU_CONTROL_ATUSIZE_512 (0x1 << 12)
85#define GLOBAL_ATU_CONTROL_ATUSIZE_1024 (0x2 << 12)
86#define GLOBAL_ATU_CONTROL_ATE_AGE_SHIFT 4
87#define GLOBAL_ATU_CONTROL_ATE_AGE_MASK (0xff << 4)
88#define GLOBAL_ATU_CONTROL_ATE_AGE_5MIN (0x13 << 4)
89#define GLOBAL_ATU_OP 0x0b
90#define GLOBAL_ATU_OP_BUSY BIT(15)
91#define GLOBAL_ATU_OP_NOP (0 << 12)
92#define GLOBAL_ATU_OP_FLUSH_ALL ((1 << 12) | GLOBAL_ATU_OP_BUSY)
93#define GLOBAL_ATU_OP_FLUSH_UNLOCKED ((2 << 12) | GLOBAL_ATU_OP_BUSY)
94#define GLOBAL_ATU_OP_LOAD_DB ((3 << 12) | GLOBAL_ATU_OP_BUSY)
95#define GLOBAL_ATU_OP_GET_NEXT_DB ((4 << 12) | GLOBAL_ATU_OP_BUSY)
96#define GLOBAL_ATU_OP_FLUSH_DB ((5 << 12) | GLOBAL_ATU_OP_BUSY)
97#define GLOBAL_ATU_OP_FLUSH_UNLOCKED_DB ((6 << 12) | GLOBAL_ATU_OP_BUSY)
98#define GLOBAL_ATU_DATA 0x0c
99#define GLOBAL_ATU_DATA_PORT_VECTOR_MASK 0x3f0
100#define GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT 4
101#define GLOBAL_ATU_DATA_STATE_MASK 0x0f
102#define GLOBAL_ATU_DATA_STATE_UNUSED 0x00
103#define GLOBAL_ATU_DATA_STATE_UC_STATIC 0x0e
104#define GLOBAL_ATU_DATA_STATE_UC_LOCKED 0x0f
105#define GLOBAL_ATU_DATA_STATE_MC_STATIC 0x07
106#define GLOBAL_ATU_DATA_STATE_MC_LOCKED 0x0e
107#define GLOBAL_ATU_MAC_01 0x0d
108#define GLOBAL_ATU_MAC_23 0x0e
109#define GLOBAL_ATU_MAC_45 0x0f
110
111#endif
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 05aa7597dab9..31c5e476fd64 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -29,6 +29,7 @@ source "drivers/net/ethernet/apm/Kconfig"
29source "drivers/net/ethernet/apple/Kconfig" 29source "drivers/net/ethernet/apple/Kconfig"
30source "drivers/net/ethernet/arc/Kconfig" 30source "drivers/net/ethernet/arc/Kconfig"
31source "drivers/net/ethernet/atheros/Kconfig" 31source "drivers/net/ethernet/atheros/Kconfig"
32source "drivers/net/ethernet/aurora/Kconfig"
32source "drivers/net/ethernet/cadence/Kconfig" 33source "drivers/net/ethernet/cadence/Kconfig"
33source "drivers/net/ethernet/adi/Kconfig" 34source "drivers/net/ethernet/adi/Kconfig"
34source "drivers/net/ethernet/broadcom/Kconfig" 35source "drivers/net/ethernet/broadcom/Kconfig"
@@ -78,7 +79,6 @@ source "drivers/net/ethernet/ibm/Kconfig"
78source "drivers/net/ethernet/intel/Kconfig" 79source "drivers/net/ethernet/intel/Kconfig"
79source "drivers/net/ethernet/i825xx/Kconfig" 80source "drivers/net/ethernet/i825xx/Kconfig"
80source "drivers/net/ethernet/xscale/Kconfig" 81source "drivers/net/ethernet/xscale/Kconfig"
81source "drivers/net/ethernet/icplus/Kconfig"
82 82
83config JME 83config JME
84 tristate "JMicron(R) PCI-Express Gigabit Ethernet support" 84 tristate "JMicron(R) PCI-Express Gigabit Ethernet support"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index ddfc808110a1..071f84eb6f3f 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_NET_XGENE) += apm/
15obj-$(CONFIG_NET_VENDOR_APPLE) += apple/ 15obj-$(CONFIG_NET_VENDOR_APPLE) += apple/
16obj-$(CONFIG_NET_VENDOR_ARC) += arc/ 16obj-$(CONFIG_NET_VENDOR_ARC) += arc/
17obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/ 17obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/
18obj-$(CONFIG_NET_VENDOR_AURORA) += aurora/
18obj-$(CONFIG_NET_CADENCE) += cadence/ 19obj-$(CONFIG_NET_CADENCE) += cadence/
19obj-$(CONFIG_NET_BFIN) += adi/ 20obj-$(CONFIG_NET_BFIN) += adi/
20obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/ 21obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
@@ -41,7 +42,6 @@ obj-$(CONFIG_NET_VENDOR_IBM) += ibm/
41obj-$(CONFIG_NET_VENDOR_INTEL) += intel/ 42obj-$(CONFIG_NET_VENDOR_INTEL) += intel/
42obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/ 43obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/
43obj-$(CONFIG_NET_VENDOR_XSCALE) += xscale/ 44obj-$(CONFIG_NET_VENDOR_XSCALE) += xscale/
44obj-$(CONFIG_IP1000) += icplus/
45obj-$(CONFIG_JME) += jme.o 45obj-$(CONFIG_JME) += jme.o
46obj-$(CONFIG_KORINA) += korina.o 46obj-$(CONFIG_KORINA) += korina.o
47obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o 47obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index e2afabf3a465..7ccebae9cb48 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1500,10 +1500,11 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
1500 return -ENODEV; 1500 return -ENODEV;
1501 } 1501 }
1502 1502
1503 if (!pci_set_dma_mask(pdev, PCNET32_DMA_MASK)) { 1503 err = pci_set_dma_mask(pdev, PCNET32_DMA_MASK);
1504 if (err) {
1504 if (pcnet32_debug & NETIF_MSG_PROBE) 1505 if (pcnet32_debug & NETIF_MSG_PROBE)
1505 pr_err("architecture does not support 32bit PCI busmaster DMA\n"); 1506 pr_err("architecture does not support 32bit PCI busmaster DMA\n");
1506 return -ENODEV; 1507 return err;
1507 } 1508 }
1508 if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) { 1509 if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) {
1509 if (pcnet32_debug & NETIF_MSG_PROBE) 1510 if (pcnet32_debug & NETIF_MSG_PROBE)
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 991412ce6f48..9147a0107c44 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -450,12 +450,12 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
450 return NETDEV_TX_OK; 450 return NETDEV_TX_OK;
451 } 451 }
452 452
453 pdata->ring_ops->wr_cmd(tx_ring, count);
454 skb_tx_timestamp(skb); 453 skb_tx_timestamp(skb);
455 454
456 pdata->stats.tx_packets++; 455 pdata->stats.tx_packets++;
457 pdata->stats.tx_bytes += skb->len; 456 pdata->stats.tx_bytes += skb->len;
458 457
458 pdata->ring_ops->wr_cmd(tx_ring, count);
459 return NETDEV_TX_OK; 459 return NETDEV_TX_OK;
460} 460}
461 461
@@ -688,10 +688,10 @@ static int xgene_enet_open(struct net_device *ndev)
688 mac_ops->tx_enable(pdata); 688 mac_ops->tx_enable(pdata);
689 mac_ops->rx_enable(pdata); 689 mac_ops->rx_enable(pdata);
690 690
691 xgene_enet_napi_enable(pdata);
691 ret = xgene_enet_register_irq(ndev); 692 ret = xgene_enet_register_irq(ndev);
692 if (ret) 693 if (ret)
693 return ret; 694 return ret;
694 xgene_enet_napi_enable(pdata);
695 695
696 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) 696 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
697 phy_start(pdata->phy_dev); 697 phy_start(pdata->phy_dev);
@@ -715,13 +715,13 @@ static int xgene_enet_close(struct net_device *ndev)
715 else 715 else
716 cancel_delayed_work_sync(&pdata->link_work); 716 cancel_delayed_work_sync(&pdata->link_work);
717 717
718 xgene_enet_napi_disable(pdata);
719 xgene_enet_free_irq(ndev);
720 xgene_enet_process_ring(pdata->rx_ring, -1);
721
722 mac_ops->tx_disable(pdata); 718 mac_ops->tx_disable(pdata);
723 mac_ops->rx_disable(pdata); 719 mac_ops->rx_disable(pdata);
724 720
721 xgene_enet_free_irq(ndev);
722 xgene_enet_napi_disable(pdata);
723 xgene_enet_process_ring(pdata->rx_ring, -1);
724
725 return 0; 725 return 0;
726} 726}
727 727
@@ -1474,15 +1474,15 @@ static int xgene_enet_probe(struct platform_device *pdev)
1474 } 1474 }
1475 ndev->hw_features = ndev->features; 1475 ndev->hw_features = ndev->features;
1476 1476
1477 ret = register_netdev(ndev); 1477 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
1478 if (ret) { 1478 if (ret) {
1479 netdev_err(ndev, "Failed to register netdev\n"); 1479 netdev_err(ndev, "No usable DMA configuration\n");
1480 goto err; 1480 goto err;
1481 } 1481 }
1482 1482
1483 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64)); 1483 ret = register_netdev(ndev);
1484 if (ret) { 1484 if (ret) {
1485 netdev_err(ndev, "No usable DMA configuration\n"); 1485 netdev_err(ndev, "Failed to register netdev\n");
1486 goto err; 1486 goto err;
1487 } 1487 }
1488 1488
@@ -1490,14 +1490,17 @@ static int xgene_enet_probe(struct platform_device *pdev)
1490 if (ret) 1490 if (ret)
1491 goto err; 1491 goto err;
1492 1492
1493 xgene_enet_napi_add(pdata);
1494 mac_ops = pdata->mac_ops; 1493 mac_ops = pdata->mac_ops;
1495 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) 1494 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
1496 ret = xgene_enet_mdio_config(pdata); 1495 ret = xgene_enet_mdio_config(pdata);
1497 else 1496 if (ret)
1497 goto err;
1498 } else {
1498 INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state); 1499 INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state);
1500 }
1499 1501
1500 return ret; 1502 xgene_enet_napi_add(pdata);
1503 return 0;
1501err: 1504err:
1502 unregister_netdev(ndev); 1505 unregister_netdev(ndev);
1503 free_netdev(ndev); 1506 free_netdev(ndev);
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index c8af3ce3ea38..bd377a6b067d 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1534,6 +1534,8 @@ static const struct pci_device_id alx_pci_tbl[] = {
1534 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, 1534 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
1535 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200), 1535 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200),
1536 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, 1536 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
1537 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2400),
1538 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
1537 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162), 1539 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162),
1538 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, 1540 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
1539 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) }, 1541 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) },
diff --git a/drivers/net/ethernet/atheros/alx/reg.h b/drivers/net/ethernet/atheros/alx/reg.h
index af006b44b2a6..0959e6824cb6 100644
--- a/drivers/net/ethernet/atheros/alx/reg.h
+++ b/drivers/net/ethernet/atheros/alx/reg.h
@@ -37,6 +37,7 @@
37 37
38#define ALX_DEV_ID_AR8161 0x1091 38#define ALX_DEV_ID_AR8161 0x1091
39#define ALX_DEV_ID_E2200 0xe091 39#define ALX_DEV_ID_E2200 0xe091
40#define ALX_DEV_ID_E2400 0xe0a1
40#define ALX_DEV_ID_AR8162 0x1090 41#define ALX_DEV_ID_AR8162 0x1090
41#define ALX_DEV_ID_AR8171 0x10A1 42#define ALX_DEV_ID_AR8171 0x10A1
42#define ALX_DEV_ID_AR8172 0x10A0 43#define ALX_DEV_ID_AR8172 0x10A0
diff --git a/drivers/net/ethernet/aurora/Kconfig b/drivers/net/ethernet/aurora/Kconfig
new file mode 100644
index 000000000000..a3c7106fdf85
--- /dev/null
+++ b/drivers/net/ethernet/aurora/Kconfig
@@ -0,0 +1,20 @@
1config NET_VENDOR_AURORA
2 bool "Aurora VLSI devices"
3 help
4 If you have a network (Ethernet) device belonging to this class,
5 say Y.
6
7 Note that the answer to this question doesn't directly affect the
8 kernel: saying N will just cause the configurator to skip all
9 questions about Aurora devices. If you say Y, you will be asked
10 for your specific device in the following questions.
11
12if NET_VENDOR_AURORA
13
14config AURORA_NB8800
15 tristate "Aurora AU-NB8800 support"
16 select PHYLIB
17 help
18 Support for the AU-NB8800 gigabit Ethernet controller.
19
20endif
diff --git a/drivers/net/ethernet/aurora/Makefile b/drivers/net/ethernet/aurora/Makefile
new file mode 100644
index 000000000000..6cb528a2fc26
--- /dev/null
+++ b/drivers/net/ethernet/aurora/Makefile
@@ -0,0 +1 @@
obj-$(CONFIG_AURORA_NB8800) += nb8800.o
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
new file mode 100644
index 000000000000..ecc4a334c507
--- /dev/null
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -0,0 +1,1552 @@
1/*
2 * Copyright (C) 2015 Mans Rullgard <mans@mansr.com>
3 *
4 * Mostly rewritten, based on driver from Sigma Designs. Original
5 * copyright notice below.
6 *
7 *
8 * Driver for tangox SMP864x/SMP865x/SMP867x/SMP868x builtin Ethernet Mac.
9 *
10 * Copyright (C) 2005 Maxime Bizon <mbizon@freebox.fr>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 */
22
23#include <linux/module.h>
24#include <linux/etherdevice.h>
25#include <linux/delay.h>
26#include <linux/ethtool.h>
27#include <linux/interrupt.h>
28#include <linux/platform_device.h>
29#include <linux/of_device.h>
30#include <linux/of_mdio.h>
31#include <linux/of_net.h>
32#include <linux/dma-mapping.h>
33#include <linux/phy.h>
34#include <linux/cache.h>
35#include <linux/jiffies.h>
36#include <linux/io.h>
37#include <linux/iopoll.h>
38#include <asm/barrier.h>
39
40#include "nb8800.h"
41
42static void nb8800_tx_done(struct net_device *dev);
43static int nb8800_dma_stop(struct net_device *dev);
44
45static inline u8 nb8800_readb(struct nb8800_priv *priv, int reg)
46{
47 return readb_relaxed(priv->base + reg);
48}
49
50static inline u32 nb8800_readl(struct nb8800_priv *priv, int reg)
51{
52 return readl_relaxed(priv->base + reg);
53}
54
55static inline void nb8800_writeb(struct nb8800_priv *priv, int reg, u8 val)
56{
57 writeb_relaxed(val, priv->base + reg);
58}
59
60static inline void nb8800_writew(struct nb8800_priv *priv, int reg, u16 val)
61{
62 writew_relaxed(val, priv->base + reg);
63}
64
65static inline void nb8800_writel(struct nb8800_priv *priv, int reg, u32 val)
66{
67 writel_relaxed(val, priv->base + reg);
68}
69
70static inline void nb8800_maskb(struct nb8800_priv *priv, int reg,
71 u32 mask, u32 val)
72{
73 u32 old = nb8800_readb(priv, reg);
74 u32 new = (old & ~mask) | (val & mask);
75
76 if (new != old)
77 nb8800_writeb(priv, reg, new);
78}
79
80static inline void nb8800_maskl(struct nb8800_priv *priv, int reg,
81 u32 mask, u32 val)
82{
83 u32 old = nb8800_readl(priv, reg);
84 u32 new = (old & ~mask) | (val & mask);
85
86 if (new != old)
87 nb8800_writel(priv, reg, new);
88}
89
90static inline void nb8800_modb(struct nb8800_priv *priv, int reg, u8 bits,
91 bool set)
92{
93 nb8800_maskb(priv, reg, bits, set ? bits : 0);
94}
95
96static inline void nb8800_setb(struct nb8800_priv *priv, int reg, u8 bits)
97{
98 nb8800_maskb(priv, reg, bits, bits);
99}
100
101static inline void nb8800_clearb(struct nb8800_priv *priv, int reg, u8 bits)
102{
103 nb8800_maskb(priv, reg, bits, 0);
104}
105
106static inline void nb8800_modl(struct nb8800_priv *priv, int reg, u32 bits,
107 bool set)
108{
109 nb8800_maskl(priv, reg, bits, set ? bits : 0);
110}
111
112static inline void nb8800_setl(struct nb8800_priv *priv, int reg, u32 bits)
113{
114 nb8800_maskl(priv, reg, bits, bits);
115}
116
117static inline void nb8800_clearl(struct nb8800_priv *priv, int reg, u32 bits)
118{
119 nb8800_maskl(priv, reg, bits, 0);
120}
121
122static int nb8800_mdio_wait(struct mii_bus *bus)
123{
124 struct nb8800_priv *priv = bus->priv;
125 u32 val;
126
127 return readl_poll_timeout_atomic(priv->base + NB8800_MDIO_CMD,
128 val, !(val & MDIO_CMD_GO), 1, 1000);
129}
130
131static int nb8800_mdio_cmd(struct mii_bus *bus, u32 cmd)
132{
133 struct nb8800_priv *priv = bus->priv;
134 int err;
135
136 err = nb8800_mdio_wait(bus);
137 if (err)
138 return err;
139
140 nb8800_writel(priv, NB8800_MDIO_CMD, cmd);
141 udelay(10);
142 nb8800_writel(priv, NB8800_MDIO_CMD, cmd | MDIO_CMD_GO);
143
144 return nb8800_mdio_wait(bus);
145}
146
147static int nb8800_mdio_read(struct mii_bus *bus, int phy_id, int reg)
148{
149 struct nb8800_priv *priv = bus->priv;
150 u32 val;
151 int err;
152
153 err = nb8800_mdio_cmd(bus, MDIO_CMD_ADDR(phy_id) | MDIO_CMD_REG(reg));
154 if (err)
155 return err;
156
157 val = nb8800_readl(priv, NB8800_MDIO_STS);
158 if (val & MDIO_STS_ERR)
159 return 0xffff;
160
161 return val & 0xffff;
162}
163
164static int nb8800_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
165{
166 u32 cmd = MDIO_CMD_ADDR(phy_id) | MDIO_CMD_REG(reg) |
167 MDIO_CMD_DATA(val) | MDIO_CMD_WR;
168
169 return nb8800_mdio_cmd(bus, cmd);
170}
171
172static void nb8800_mac_tx(struct net_device *dev, bool enable)
173{
174 struct nb8800_priv *priv = netdev_priv(dev);
175
176 while (nb8800_readl(priv, NB8800_TXC_CR) & TCR_EN)
177 cpu_relax();
178
179 nb8800_modb(priv, NB8800_TX_CTL1, TX_EN, enable);
180}
181
182static void nb8800_mac_rx(struct net_device *dev, bool enable)
183{
184 nb8800_modb(netdev_priv(dev), NB8800_RX_CTL, RX_EN, enable);
185}
186
187static void nb8800_mac_af(struct net_device *dev, bool enable)
188{
189 nb8800_modb(netdev_priv(dev), NB8800_RX_CTL, RX_AF_EN, enable);
190}
191
192static void nb8800_start_rx(struct net_device *dev)
193{
194 nb8800_setl(netdev_priv(dev), NB8800_RXC_CR, RCR_EN);
195}
196
197static int nb8800_alloc_rx(struct net_device *dev, unsigned int i, bool napi)
198{
199 struct nb8800_priv *priv = netdev_priv(dev);
200 struct nb8800_rx_desc *rxd = &priv->rx_descs[i];
201 struct nb8800_rx_buf *rxb = &priv->rx_bufs[i];
202 int size = L1_CACHE_ALIGN(RX_BUF_SIZE);
203 dma_addr_t dma_addr;
204 struct page *page;
205 unsigned long offset;
206 void *data;
207
208 data = napi ? napi_alloc_frag(size) : netdev_alloc_frag(size);
209 if (!data)
210 return -ENOMEM;
211
212 page = virt_to_head_page(data);
213 offset = data - page_address(page);
214
215 dma_addr = dma_map_page(&dev->dev, page, offset, RX_BUF_SIZE,
216 DMA_FROM_DEVICE);
217
218 if (dma_mapping_error(&dev->dev, dma_addr)) {
219 skb_free_frag(data);
220 return -ENOMEM;
221 }
222
223 rxb->page = page;
224 rxb->offset = offset;
225 rxd->desc.s_addr = dma_addr;
226
227 return 0;
228}
229
230static void nb8800_receive(struct net_device *dev, unsigned int i,
231 unsigned int len)
232{
233 struct nb8800_priv *priv = netdev_priv(dev);
234 struct nb8800_rx_desc *rxd = &priv->rx_descs[i];
235 struct page *page = priv->rx_bufs[i].page;
236 int offset = priv->rx_bufs[i].offset;
237 void *data = page_address(page) + offset;
238 dma_addr_t dma = rxd->desc.s_addr;
239 struct sk_buff *skb;
240 unsigned int size;
241 int err;
242
243 size = len <= RX_COPYBREAK ? len : RX_COPYHDR;
244
245 skb = napi_alloc_skb(&priv->napi, size);
246 if (!skb) {
247 netdev_err(dev, "rx skb allocation failed\n");
248 dev->stats.rx_dropped++;
249 return;
250 }
251
252 if (len <= RX_COPYBREAK) {
253 dma_sync_single_for_cpu(&dev->dev, dma, len, DMA_FROM_DEVICE);
254 memcpy(skb_put(skb, len), data, len);
255 dma_sync_single_for_device(&dev->dev, dma, len,
256 DMA_FROM_DEVICE);
257 } else {
258 err = nb8800_alloc_rx(dev, i, true);
259 if (err) {
260 netdev_err(dev, "rx buffer allocation failed\n");
261 dev->stats.rx_dropped++;
262 return;
263 }
264
265 dma_unmap_page(&dev->dev, dma, RX_BUF_SIZE, DMA_FROM_DEVICE);
266 memcpy(skb_put(skb, RX_COPYHDR), data, RX_COPYHDR);
267 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
268 offset + RX_COPYHDR, len - RX_COPYHDR,
269 RX_BUF_SIZE);
270 }
271
272 skb->protocol = eth_type_trans(skb, dev);
273 napi_gro_receive(&priv->napi, skb);
274}
275
276static void nb8800_rx_error(struct net_device *dev, u32 report)
277{
278 if (report & RX_LENGTH_ERR)
279 dev->stats.rx_length_errors++;
280
281 if (report & RX_FCS_ERR)
282 dev->stats.rx_crc_errors++;
283
284 if (report & RX_FIFO_OVERRUN)
285 dev->stats.rx_fifo_errors++;
286
287 if (report & RX_ALIGNMENT_ERROR)
288 dev->stats.rx_frame_errors++;
289
290 dev->stats.rx_errors++;
291}
292
293static int nb8800_poll(struct napi_struct *napi, int budget)
294{
295 struct net_device *dev = napi->dev;
296 struct nb8800_priv *priv = netdev_priv(dev);
297 struct nb8800_rx_desc *rxd;
298 unsigned int last = priv->rx_eoc;
299 unsigned int next;
300 int work = 0;
301
302 nb8800_tx_done(dev);
303
304again:
305 while (work < budget) {
306 struct nb8800_rx_buf *rxb;
307 unsigned int len;
308
309 next = (last + 1) % RX_DESC_COUNT;
310
311 rxb = &priv->rx_bufs[next];
312 rxd = &priv->rx_descs[next];
313
314 if (!rxd->report)
315 break;
316
317 len = RX_BYTES_TRANSFERRED(rxd->report);
318
319 if (IS_RX_ERROR(rxd->report))
320 nb8800_rx_error(dev, rxd->report);
321 else
322 nb8800_receive(dev, next, len);
323
324 dev->stats.rx_packets++;
325 dev->stats.rx_bytes += len;
326
327 if (rxd->report & RX_MULTICAST_PKT)
328 dev->stats.multicast++;
329
330 rxd->report = 0;
331 last = next;
332 work++;
333 }
334
335 if (work) {
336 priv->rx_descs[last].desc.config |= DESC_EOC;
337 wmb(); /* ensure new EOC is written before clearing old */
338 priv->rx_descs[priv->rx_eoc].desc.config &= ~DESC_EOC;
339 priv->rx_eoc = last;
340 nb8800_start_rx(dev);
341 }
342
343 if (work < budget) {
344 nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_irq);
345
346 /* If a packet arrived after we last checked but
347 * before writing RX_ITR, the interrupt will be
348 * delayed, so we retrieve it now.
349 */
350 if (priv->rx_descs[next].report)
351 goto again;
352
353 napi_complete_done(napi, work);
354 }
355
356 return work;
357}
358
359static void __nb8800_tx_dma_start(struct net_device *dev)
360{
361 struct nb8800_priv *priv = netdev_priv(dev);
362 struct nb8800_tx_buf *txb;
363 u32 txc_cr;
364
365 txb = &priv->tx_bufs[priv->tx_queue];
366 if (!txb->ready)
367 return;
368
369 txc_cr = nb8800_readl(priv, NB8800_TXC_CR);
370 if (txc_cr & TCR_EN)
371 return;
372
373 nb8800_writel(priv, NB8800_TX_DESC_ADDR, txb->dma_desc);
374 wmb(); /* ensure desc addr is written before starting DMA */
375 nb8800_writel(priv, NB8800_TXC_CR, txc_cr | TCR_EN);
376
377 priv->tx_queue = (priv->tx_queue + txb->chain_len) % TX_DESC_COUNT;
378}
379
380static void nb8800_tx_dma_start(struct net_device *dev)
381{
382 struct nb8800_priv *priv = netdev_priv(dev);
383
384 spin_lock_irq(&priv->tx_lock);
385 __nb8800_tx_dma_start(dev);
386 spin_unlock_irq(&priv->tx_lock);
387}
388
389static void nb8800_tx_dma_start_irq(struct net_device *dev)
390{
391 struct nb8800_priv *priv = netdev_priv(dev);
392
393 spin_lock(&priv->tx_lock);
394 __nb8800_tx_dma_start(dev);
395 spin_unlock(&priv->tx_lock);
396}
397
398static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev)
399{
400 struct nb8800_priv *priv = netdev_priv(dev);
401 struct nb8800_tx_desc *txd;
402 struct nb8800_tx_buf *txb;
403 struct nb8800_dma_desc *desc;
404 dma_addr_t dma_addr;
405 unsigned int dma_len;
406 unsigned int align;
407 unsigned int next;
408
409 if (atomic_read(&priv->tx_free) <= NB8800_DESC_LOW) {
410 netif_stop_queue(dev);
411 return NETDEV_TX_BUSY;
412 }
413
414 align = (8 - (uintptr_t)skb->data) & 7;
415
416 dma_len = skb->len - align;
417 dma_addr = dma_map_single(&dev->dev, skb->data + align,
418 dma_len, DMA_TO_DEVICE);
419
420 if (dma_mapping_error(&dev->dev, dma_addr)) {
421 netdev_err(dev, "tx dma mapping error\n");
422 kfree_skb(skb);
423 dev->stats.tx_dropped++;
424 return NETDEV_TX_OK;
425 }
426
427 if (atomic_dec_return(&priv->tx_free) <= NB8800_DESC_LOW) {
428 netif_stop_queue(dev);
429 skb->xmit_more = 0;
430 }
431
432 next = priv->tx_next;
433 txb = &priv->tx_bufs[next];
434 txd = &priv->tx_descs[next];
435 desc = &txd->desc[0];
436
437 next = (next + 1) % TX_DESC_COUNT;
438
439 if (align) {
440 memcpy(txd->buf, skb->data, align);
441
442 desc->s_addr =
443 txb->dma_desc + offsetof(struct nb8800_tx_desc, buf);
444 desc->n_addr = txb->dma_desc + sizeof(txd->desc[0]);
445 desc->config = DESC_BTS(2) | DESC_DS | align;
446
447 desc++;
448 }
449
450 desc->s_addr = dma_addr;
451 desc->n_addr = priv->tx_bufs[next].dma_desc;
452 desc->config = DESC_BTS(2) | DESC_DS | DESC_EOF | dma_len;
453
454 if (!skb->xmit_more)
455 desc->config |= DESC_EOC;
456
457 txb->skb = skb;
458 txb->dma_addr = dma_addr;
459 txb->dma_len = dma_len;
460
461 if (!priv->tx_chain) {
462 txb->chain_len = 1;
463 priv->tx_chain = txb;
464 } else {
465 priv->tx_chain->chain_len++;
466 }
467
468 netdev_sent_queue(dev, skb->len);
469
470 priv->tx_next = next;
471
472 if (!skb->xmit_more) {
473 smp_wmb();
474 priv->tx_chain->ready = true;
475 priv->tx_chain = NULL;
476 nb8800_tx_dma_start(dev);
477 }
478
479 return NETDEV_TX_OK;
480}
481
482static void nb8800_tx_error(struct net_device *dev, u32 report)
483{
484 if (report & TX_LATE_COLLISION)
485 dev->stats.collisions++;
486
487 if (report & TX_PACKET_DROPPED)
488 dev->stats.tx_dropped++;
489
490 if (report & TX_FIFO_UNDERRUN)
491 dev->stats.tx_fifo_errors++;
492
493 dev->stats.tx_errors++;
494}
495
496static void nb8800_tx_done(struct net_device *dev)
497{
498 struct nb8800_priv *priv = netdev_priv(dev);
499 unsigned int limit = priv->tx_next;
500 unsigned int done = priv->tx_done;
501 unsigned int packets = 0;
502 unsigned int len = 0;
503
504 while (done != limit) {
505 struct nb8800_tx_desc *txd = &priv->tx_descs[done];
506 struct nb8800_tx_buf *txb = &priv->tx_bufs[done];
507 struct sk_buff *skb;
508
509 if (!txd->report)
510 break;
511
512 skb = txb->skb;
513 len += skb->len;
514
515 dma_unmap_single(&dev->dev, txb->dma_addr, txb->dma_len,
516 DMA_TO_DEVICE);
517
518 if (IS_TX_ERROR(txd->report)) {
519 nb8800_tx_error(dev, txd->report);
520 kfree_skb(skb);
521 } else {
522 consume_skb(skb);
523 }
524
525 dev->stats.tx_packets++;
526 dev->stats.tx_bytes += TX_BYTES_TRANSFERRED(txd->report);
527 dev->stats.collisions += TX_EARLY_COLLISIONS(txd->report);
528
529 txb->skb = NULL;
530 txb->ready = false;
531 txd->report = 0;
532
533 done = (done + 1) % TX_DESC_COUNT;
534 packets++;
535 }
536
537 if (packets) {
538 smp_mb__before_atomic();
539 atomic_add(packets, &priv->tx_free);
540 netdev_completed_queue(dev, packets, len);
541 netif_wake_queue(dev);
542 priv->tx_done = done;
543 }
544}
545
546static irqreturn_t nb8800_irq(int irq, void *dev_id)
547{
548 struct net_device *dev = dev_id;
549 struct nb8800_priv *priv = netdev_priv(dev);
550 irqreturn_t ret = IRQ_NONE;
551 u32 val;
552
553 /* tx interrupt */
554 val = nb8800_readl(priv, NB8800_TXC_SR);
555 if (val) {
556 nb8800_writel(priv, NB8800_TXC_SR, val);
557
558 if (val & TSR_DI)
559 nb8800_tx_dma_start_irq(dev);
560
561 if (val & TSR_TI)
562 napi_schedule_irqoff(&priv->napi);
563
564 if (unlikely(val & TSR_DE))
565 netdev_err(dev, "TX DMA error\n");
566
567 /* should never happen with automatic status retrieval */
568 if (unlikely(val & TSR_TO))
569 netdev_err(dev, "TX Status FIFO overflow\n");
570
571 ret = IRQ_HANDLED;
572 }
573
574 /* rx interrupt */
575 val = nb8800_readl(priv, NB8800_RXC_SR);
576 if (val) {
577 nb8800_writel(priv, NB8800_RXC_SR, val);
578
579 if (likely(val & (RSR_RI | RSR_DI))) {
580 nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_poll);
581 napi_schedule_irqoff(&priv->napi);
582 }
583
584 if (unlikely(val & RSR_DE))
585 netdev_err(dev, "RX DMA error\n");
586
587 /* should never happen with automatic status retrieval */
588 if (unlikely(val & RSR_RO))
589 netdev_err(dev, "RX Status FIFO overflow\n");
590
591 ret = IRQ_HANDLED;
592 }
593
594 return ret;
595}
596
597static void nb8800_mac_config(struct net_device *dev)
598{
599 struct nb8800_priv *priv = netdev_priv(dev);
600 bool gigabit = priv->speed == SPEED_1000;
601 u32 mac_mode_mask = RGMII_MODE | HALF_DUPLEX | GMAC_MODE;
602 u32 mac_mode = 0;
603 u32 slot_time;
604 u32 phy_clk;
605 u32 ict;
606
607 if (!priv->duplex)
608 mac_mode |= HALF_DUPLEX;
609
610 if (gigabit) {
611 if (priv->phy_mode == PHY_INTERFACE_MODE_RGMII)
612 mac_mode |= RGMII_MODE;
613
614 mac_mode |= GMAC_MODE;
615 phy_clk = 125000000;
616
617 /* Should be 512 but register is only 8 bits */
618 slot_time = 255;
619 } else {
620 phy_clk = 25000000;
621 slot_time = 128;
622 }
623
624 ict = DIV_ROUND_UP(phy_clk, clk_get_rate(priv->clk));
625
626 nb8800_writeb(priv, NB8800_IC_THRESHOLD, ict);
627 nb8800_writeb(priv, NB8800_SLOT_TIME, slot_time);
628 nb8800_maskb(priv, NB8800_MAC_MODE, mac_mode_mask, mac_mode);
629}
630
631static void nb8800_pause_config(struct net_device *dev)
632{
633 struct nb8800_priv *priv = netdev_priv(dev);
634 struct phy_device *phydev = priv->phydev;
635 u32 rxcr;
636
637 if (priv->pause_aneg) {
638 if (!phydev || !phydev->link)
639 return;
640
641 priv->pause_rx = phydev->pause;
642 priv->pause_tx = phydev->pause ^ phydev->asym_pause;
643 }
644
645 nb8800_modb(priv, NB8800_RX_CTL, RX_PAUSE_EN, priv->pause_rx);
646
647 rxcr = nb8800_readl(priv, NB8800_RXC_CR);
648 if (!!(rxcr & RCR_FL) == priv->pause_tx)
649 return;
650
651 if (netif_running(dev)) {
652 napi_disable(&priv->napi);
653 netif_tx_lock_bh(dev);
654 nb8800_dma_stop(dev);
655 nb8800_modl(priv, NB8800_RXC_CR, RCR_FL, priv->pause_tx);
656 nb8800_start_rx(dev);
657 netif_tx_unlock_bh(dev);
658 napi_enable(&priv->napi);
659 } else {
660 nb8800_modl(priv, NB8800_RXC_CR, RCR_FL, priv->pause_tx);
661 }
662}
663
664static void nb8800_link_reconfigure(struct net_device *dev)
665{
666 struct nb8800_priv *priv = netdev_priv(dev);
667 struct phy_device *phydev = priv->phydev;
668 int change = 0;
669
670 if (phydev->link) {
671 if (phydev->speed != priv->speed) {
672 priv->speed = phydev->speed;
673 change = 1;
674 }
675
676 if (phydev->duplex != priv->duplex) {
677 priv->duplex = phydev->duplex;
678 change = 1;
679 }
680
681 if (change)
682 nb8800_mac_config(dev);
683
684 nb8800_pause_config(dev);
685 }
686
687 if (phydev->link != priv->link) {
688 priv->link = phydev->link;
689 change = 1;
690 }
691
692 if (change)
693 phy_print_status(priv->phydev);
694}
695
696static void nb8800_update_mac_addr(struct net_device *dev)
697{
698 struct nb8800_priv *priv = netdev_priv(dev);
699 int i;
700
701 for (i = 0; i < ETH_ALEN; i++)
702 nb8800_writeb(priv, NB8800_SRC_ADDR(i), dev->dev_addr[i]);
703
704 for (i = 0; i < ETH_ALEN; i++)
705 nb8800_writeb(priv, NB8800_UC_ADDR(i), dev->dev_addr[i]);
706}
707
708static int nb8800_set_mac_address(struct net_device *dev, void *addr)
709{
710 struct sockaddr *sock = addr;
711
712 if (netif_running(dev))
713 return -EBUSY;
714
715 ether_addr_copy(dev->dev_addr, sock->sa_data);
716 nb8800_update_mac_addr(dev);
717
718 return 0;
719}
720
721static void nb8800_mc_init(struct net_device *dev, int val)
722{
723 struct nb8800_priv *priv = netdev_priv(dev);
724
725 nb8800_writeb(priv, NB8800_MC_INIT, val);
726 readb_poll_timeout_atomic(priv->base + NB8800_MC_INIT, val, !val,
727 1, 1000);
728}
729
730static void nb8800_set_rx_mode(struct net_device *dev)
731{
732 struct nb8800_priv *priv = netdev_priv(dev);
733 struct netdev_hw_addr *ha;
734 int i;
735
736 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
737 nb8800_mac_af(dev, false);
738 return;
739 }
740
741 nb8800_mac_af(dev, true);
742 nb8800_mc_init(dev, 0);
743
744 netdev_for_each_mc_addr(ha, dev) {
745 for (i = 0; i < ETH_ALEN; i++)
746 nb8800_writeb(priv, NB8800_MC_ADDR(i), ha->addr[i]);
747
748 nb8800_mc_init(dev, 0xff);
749 }
750}
751
752#define RX_DESC_SIZE (RX_DESC_COUNT * sizeof(struct nb8800_rx_desc))
753#define TX_DESC_SIZE (TX_DESC_COUNT * sizeof(struct nb8800_tx_desc))
754
755static void nb8800_dma_free(struct net_device *dev)
756{
757 struct nb8800_priv *priv = netdev_priv(dev);
758 unsigned int i;
759
760 if (priv->rx_bufs) {
761 for (i = 0; i < RX_DESC_COUNT; i++)
762 if (priv->rx_bufs[i].page)
763 put_page(priv->rx_bufs[i].page);
764
765 kfree(priv->rx_bufs);
766 priv->rx_bufs = NULL;
767 }
768
769 if (priv->tx_bufs) {
770 for (i = 0; i < TX_DESC_COUNT; i++)
771 kfree_skb(priv->tx_bufs[i].skb);
772
773 kfree(priv->tx_bufs);
774 priv->tx_bufs = NULL;
775 }
776
777 if (priv->rx_descs) {
778 dma_free_coherent(dev->dev.parent, RX_DESC_SIZE, priv->rx_descs,
779 priv->rx_desc_dma);
780 priv->rx_descs = NULL;
781 }
782
783 if (priv->tx_descs) {
784 dma_free_coherent(dev->dev.parent, TX_DESC_SIZE, priv->tx_descs,
785 priv->tx_desc_dma);
786 priv->tx_descs = NULL;
787 }
788}
789
790static void nb8800_dma_reset(struct net_device *dev)
791{
792 struct nb8800_priv *priv = netdev_priv(dev);
793 struct nb8800_rx_desc *rxd;
794 struct nb8800_tx_desc *txd;
795 unsigned int i;
796
797 for (i = 0; i < RX_DESC_COUNT; i++) {
798 dma_addr_t rx_dma = priv->rx_desc_dma + i * sizeof(*rxd);
799
800 rxd = &priv->rx_descs[i];
801 rxd->desc.n_addr = rx_dma + sizeof(*rxd);
802 rxd->desc.r_addr =
803 rx_dma + offsetof(struct nb8800_rx_desc, report);
804 rxd->desc.config = priv->rx_dma_config;
805 rxd->report = 0;
806 }
807
808 rxd->desc.n_addr = priv->rx_desc_dma;
809 rxd->desc.config |= DESC_EOC;
810
811 priv->rx_eoc = RX_DESC_COUNT - 1;
812
813 for (i = 0; i < TX_DESC_COUNT; i++) {
814 struct nb8800_tx_buf *txb = &priv->tx_bufs[i];
815 dma_addr_t r_dma = txb->dma_desc +
816 offsetof(struct nb8800_tx_desc, report);
817
818 txd = &priv->tx_descs[i];
819 txd->desc[0].r_addr = r_dma;
820 txd->desc[1].r_addr = r_dma;
821 txd->report = 0;
822 }
823
824 priv->tx_next = 0;
825 priv->tx_queue = 0;
826 priv->tx_done = 0;
827 atomic_set(&priv->tx_free, TX_DESC_COUNT);
828
829 nb8800_writel(priv, NB8800_RX_DESC_ADDR, priv->rx_desc_dma);
830
831 wmb(); /* ensure all setup is written before starting */
832}
833
834static int nb8800_dma_init(struct net_device *dev)
835{
836 struct nb8800_priv *priv = netdev_priv(dev);
837 unsigned int n_rx = RX_DESC_COUNT;
838 unsigned int n_tx = TX_DESC_COUNT;
839 unsigned int i;
840 int err;
841
842 priv->rx_descs = dma_alloc_coherent(dev->dev.parent, RX_DESC_SIZE,
843 &priv->rx_desc_dma, GFP_KERNEL);
844 if (!priv->rx_descs)
845 goto err_out;
846
847 priv->rx_bufs = kcalloc(n_rx, sizeof(*priv->rx_bufs), GFP_KERNEL);
848 if (!priv->rx_bufs)
849 goto err_out;
850
851 for (i = 0; i < n_rx; i++) {
852 err = nb8800_alloc_rx(dev, i, false);
853 if (err)
854 goto err_out;
855 }
856
857 priv->tx_descs = dma_alloc_coherent(dev->dev.parent, TX_DESC_SIZE,
858 &priv->tx_desc_dma, GFP_KERNEL);
859 if (!priv->tx_descs)
860 goto err_out;
861
862 priv->tx_bufs = kcalloc(n_tx, sizeof(*priv->tx_bufs), GFP_KERNEL);
863 if (!priv->tx_bufs)
864 goto err_out;
865
866 for (i = 0; i < n_tx; i++)
867 priv->tx_bufs[i].dma_desc =
868 priv->tx_desc_dma + i * sizeof(struct nb8800_tx_desc);
869
870 nb8800_dma_reset(dev);
871
872 return 0;
873
874err_out:
875 nb8800_dma_free(dev);
876
877 return -ENOMEM;
878}
879
880static int nb8800_dma_stop(struct net_device *dev)
881{
882 struct nb8800_priv *priv = netdev_priv(dev);
883 struct nb8800_tx_buf *txb = &priv->tx_bufs[0];
884 struct nb8800_tx_desc *txd = &priv->tx_descs[0];
885 int retry = 5;
886 u32 txcr;
887 u32 rxcr;
888 int err;
889 unsigned int i;
890
891 /* wait for tx to finish */
892 err = readl_poll_timeout_atomic(priv->base + NB8800_TXC_CR, txcr,
893 !(txcr & TCR_EN) &&
894 priv->tx_done == priv->tx_next,
895 1000, 1000000);
896 if (err)
897 return err;
898
899 /* The rx DMA only stops if it reaches the end of chain.
900 * To make this happen, we set the EOC flag on all rx
901 * descriptors, put the device in loopback mode, and send
902 * a few dummy frames. The interrupt handler will ignore
903 * these since NAPI is disabled and no real frames are in
904 * the tx queue.
905 */
906
907 for (i = 0; i < RX_DESC_COUNT; i++)
908 priv->rx_descs[i].desc.config |= DESC_EOC;
909
910 txd->desc[0].s_addr =
911 txb->dma_desc + offsetof(struct nb8800_tx_desc, buf);
912 txd->desc[0].config = DESC_BTS(2) | DESC_DS | DESC_EOF | DESC_EOC | 8;
913 memset(txd->buf, 0, sizeof(txd->buf));
914
915 nb8800_mac_af(dev, false);
916 nb8800_setb(priv, NB8800_MAC_MODE, LOOPBACK_EN);
917
918 do {
919 nb8800_writel(priv, NB8800_TX_DESC_ADDR, txb->dma_desc);
920 wmb();
921 nb8800_writel(priv, NB8800_TXC_CR, txcr | TCR_EN);
922
923 err = readl_poll_timeout_atomic(priv->base + NB8800_RXC_CR,
924 rxcr, !(rxcr & RCR_EN),
925 1000, 100000);
926 } while (err && --retry);
927
928 nb8800_mac_af(dev, true);
929 nb8800_clearb(priv, NB8800_MAC_MODE, LOOPBACK_EN);
930 nb8800_dma_reset(dev);
931
932 return retry ? 0 : -ETIMEDOUT;
933}
934
935static void nb8800_pause_adv(struct net_device *dev)
936{
937 struct nb8800_priv *priv = netdev_priv(dev);
938 u32 adv = 0;
939
940 if (!priv->phydev)
941 return;
942
943 if (priv->pause_rx)
944 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
945 if (priv->pause_tx)
946 adv ^= ADVERTISED_Asym_Pause;
947
948 priv->phydev->supported |= adv;
949 priv->phydev->advertising |= adv;
950}
951
952static int nb8800_open(struct net_device *dev)
953{
954 struct nb8800_priv *priv = netdev_priv(dev);
955 int err;
956
957 /* clear any pending interrupts */
958 nb8800_writel(priv, NB8800_RXC_SR, 0xf);
959 nb8800_writel(priv, NB8800_TXC_SR, 0xf);
960
961 err = nb8800_dma_init(dev);
962 if (err)
963 return err;
964
965 err = request_irq(dev->irq, nb8800_irq, 0, dev_name(&dev->dev), dev);
966 if (err)
967 goto err_free_dma;
968
969 nb8800_mac_rx(dev, true);
970 nb8800_mac_tx(dev, true);
971
972 priv->phydev = of_phy_connect(dev, priv->phy_node,
973 nb8800_link_reconfigure, 0,
974 priv->phy_mode);
975 if (!priv->phydev)
976 goto err_free_irq;
977
978 nb8800_pause_adv(dev);
979
980 netdev_reset_queue(dev);
981 napi_enable(&priv->napi);
982 netif_start_queue(dev);
983
984 nb8800_start_rx(dev);
985 phy_start(priv->phydev);
986
987 return 0;
988
989err_free_irq:
990 free_irq(dev->irq, dev);
991err_free_dma:
992 nb8800_dma_free(dev);
993
994 return err;
995}
996
997static int nb8800_stop(struct net_device *dev)
998{
999 struct nb8800_priv *priv = netdev_priv(dev);
1000
1001 phy_stop(priv->phydev);
1002
1003 netif_stop_queue(dev);
1004 napi_disable(&priv->napi);
1005
1006 nb8800_dma_stop(dev);
1007 nb8800_mac_rx(dev, false);
1008 nb8800_mac_tx(dev, false);
1009
1010 phy_disconnect(priv->phydev);
1011 priv->phydev = NULL;
1012
1013 free_irq(dev->irq, dev);
1014
1015 nb8800_dma_free(dev);
1016
1017 return 0;
1018}
1019
1020static int nb8800_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1021{
1022 struct nb8800_priv *priv = netdev_priv(dev);
1023
1024 return phy_mii_ioctl(priv->phydev, rq, cmd);
1025}
1026
1027static const struct net_device_ops nb8800_netdev_ops = {
1028 .ndo_open = nb8800_open,
1029 .ndo_stop = nb8800_stop,
1030 .ndo_start_xmit = nb8800_xmit,
1031 .ndo_set_mac_address = nb8800_set_mac_address,
1032 .ndo_set_rx_mode = nb8800_set_rx_mode,
1033 .ndo_do_ioctl = nb8800_ioctl,
1034 .ndo_change_mtu = eth_change_mtu,
1035 .ndo_validate_addr = eth_validate_addr,
1036};
1037
1038static int nb8800_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1039{
1040 struct nb8800_priv *priv = netdev_priv(dev);
1041
1042 if (!priv->phydev)
1043 return -ENODEV;
1044
1045 return phy_ethtool_gset(priv->phydev, cmd);
1046}
1047
1048static int nb8800_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1049{
1050 struct nb8800_priv *priv = netdev_priv(dev);
1051
1052 if (!priv->phydev)
1053 return -ENODEV;
1054
1055 return phy_ethtool_sset(priv->phydev, cmd);
1056}
1057
1058static int nb8800_nway_reset(struct net_device *dev)
1059{
1060 struct nb8800_priv *priv = netdev_priv(dev);
1061
1062 if (!priv->phydev)
1063 return -ENODEV;
1064
1065 return genphy_restart_aneg(priv->phydev);
1066}
1067
1068static void nb8800_get_pauseparam(struct net_device *dev,
1069 struct ethtool_pauseparam *pp)
1070{
1071 struct nb8800_priv *priv = netdev_priv(dev);
1072
1073 pp->autoneg = priv->pause_aneg;
1074 pp->rx_pause = priv->pause_rx;
1075 pp->tx_pause = priv->pause_tx;
1076}
1077
1078static int nb8800_set_pauseparam(struct net_device *dev,
1079 struct ethtool_pauseparam *pp)
1080{
1081 struct nb8800_priv *priv = netdev_priv(dev);
1082
1083 priv->pause_aneg = pp->autoneg;
1084 priv->pause_rx = pp->rx_pause;
1085 priv->pause_tx = pp->tx_pause;
1086
1087 nb8800_pause_adv(dev);
1088
1089 if (!priv->pause_aneg)
1090 nb8800_pause_config(dev);
1091 else if (priv->phydev)
1092 phy_start_aneg(priv->phydev);
1093
1094 return 0;
1095}
1096
1097static const char nb8800_stats_names[][ETH_GSTRING_LEN] = {
1098 "rx_bytes_ok",
1099 "rx_frames_ok",
1100 "rx_undersize_frames",
1101 "rx_fragment_frames",
1102 "rx_64_byte_frames",
1103 "rx_127_byte_frames",
1104 "rx_255_byte_frames",
1105 "rx_511_byte_frames",
1106 "rx_1023_byte_frames",
1107 "rx_max_size_frames",
1108 "rx_oversize_frames",
1109 "rx_bad_fcs_frames",
1110 "rx_broadcast_frames",
1111 "rx_multicast_frames",
1112 "rx_control_frames",
1113 "rx_pause_frames",
1114 "rx_unsup_control_frames",
1115 "rx_align_error_frames",
1116 "rx_overrun_frames",
1117 "rx_jabber_frames",
1118 "rx_bytes",
1119 "rx_frames",
1120
1121 "tx_bytes_ok",
1122 "tx_frames_ok",
1123 "tx_64_byte_frames",
1124 "tx_127_byte_frames",
1125 "tx_255_byte_frames",
1126 "tx_511_byte_frames",
1127 "tx_1023_byte_frames",
1128 "tx_max_size_frames",
1129 "tx_oversize_frames",
1130 "tx_broadcast_frames",
1131 "tx_multicast_frames",
1132 "tx_control_frames",
1133 "tx_pause_frames",
1134 "tx_underrun_frames",
1135 "tx_single_collision_frames",
1136 "tx_multi_collision_frames",
1137 "tx_deferred_collision_frames",
1138 "tx_late_collision_frames",
1139 "tx_excessive_collision_frames",
1140 "tx_bytes",
1141 "tx_frames",
1142 "tx_collisions",
1143};
1144
1145#define NB8800_NUM_STATS ARRAY_SIZE(nb8800_stats_names)
1146
1147static int nb8800_get_sset_count(struct net_device *dev, int sset)
1148{
1149 if (sset == ETH_SS_STATS)
1150 return NB8800_NUM_STATS;
1151
1152 return -EOPNOTSUPP;
1153}
1154
1155static void nb8800_get_strings(struct net_device *dev, u32 sset, u8 *buf)
1156{
1157 if (sset == ETH_SS_STATS)
1158 memcpy(buf, &nb8800_stats_names, sizeof(nb8800_stats_names));
1159}
1160
1161static u32 nb8800_read_stat(struct net_device *dev, int index)
1162{
1163 struct nb8800_priv *priv = netdev_priv(dev);
1164
1165 nb8800_writeb(priv, NB8800_STAT_INDEX, index);
1166
1167 return nb8800_readl(priv, NB8800_STAT_DATA);
1168}
1169
1170static void nb8800_get_ethtool_stats(struct net_device *dev,
1171 struct ethtool_stats *estats, u64 *st)
1172{
1173 unsigned int i;
1174 u32 rx, tx;
1175
1176 for (i = 0; i < NB8800_NUM_STATS / 2; i++) {
1177 rx = nb8800_read_stat(dev, i);
1178 tx = nb8800_read_stat(dev, i | 0x80);
1179 st[i] = rx;
1180 st[i + NB8800_NUM_STATS / 2] = tx;
1181 }
1182}
1183
1184static const struct ethtool_ops nb8800_ethtool_ops = {
1185 .get_settings = nb8800_get_settings,
1186 .set_settings = nb8800_set_settings,
1187 .nway_reset = nb8800_nway_reset,
1188 .get_link = ethtool_op_get_link,
1189 .get_pauseparam = nb8800_get_pauseparam,
1190 .set_pauseparam = nb8800_set_pauseparam,
1191 .get_sset_count = nb8800_get_sset_count,
1192 .get_strings = nb8800_get_strings,
1193 .get_ethtool_stats = nb8800_get_ethtool_stats,
1194};
1195
1196static int nb8800_hw_init(struct net_device *dev)
1197{
1198 struct nb8800_priv *priv = netdev_priv(dev);
1199 u32 val;
1200
1201 val = TX_RETRY_EN | TX_PAD_EN | TX_APPEND_FCS;
1202 nb8800_writeb(priv, NB8800_TX_CTL1, val);
1203
1204 /* Collision retry count */
1205 nb8800_writeb(priv, NB8800_TX_CTL2, 5);
1206
1207 val = RX_PAD_STRIP | RX_AF_EN;
1208 nb8800_writeb(priv, NB8800_RX_CTL, val);
1209
1210 /* Chosen by fair dice roll */
1211 nb8800_writeb(priv, NB8800_RANDOM_SEED, 4);
1212
1213 /* TX cycles per deferral period */
1214 nb8800_writeb(priv, NB8800_TX_SDP, 12);
1215
1216 /* The following three threshold values have been
1217 * experimentally determined for good results.
1218 */
1219
1220 /* RX/TX FIFO threshold for partial empty (64-bit entries) */
1221 nb8800_writeb(priv, NB8800_PE_THRESHOLD, 0);
1222
1223 /* RX/TX FIFO threshold for partial full (64-bit entries) */
1224 nb8800_writeb(priv, NB8800_PF_THRESHOLD, 255);
1225
1226 /* Buffer size for transmit (64-bit entries) */
1227 nb8800_writeb(priv, NB8800_TX_BUFSIZE, 64);
1228
1229 /* Configure tx DMA */
1230
1231 val = nb8800_readl(priv, NB8800_TXC_CR);
1232 val &= TCR_LE; /* keep endian setting */
1233 val |= TCR_DM; /* DMA descriptor mode */
1234 val |= TCR_RS; /* automatically store tx status */
1235 val |= TCR_DIE; /* interrupt on DMA chain completion */
1236 val |= TCR_TFI(7); /* interrupt after 7 frames transmitted */
1237 val |= TCR_BTS(2); /* 32-byte bus transaction size */
1238 nb8800_writel(priv, NB8800_TXC_CR, val);
1239
1240 /* TX complete interrupt after 10 ms or 7 frames (see above) */
1241 val = clk_get_rate(priv->clk) / 100;
1242 nb8800_writel(priv, NB8800_TX_ITR, val);
1243
1244 /* Configure rx DMA */
1245
1246 val = nb8800_readl(priv, NB8800_RXC_CR);
1247 val &= RCR_LE; /* keep endian setting */
1248 val |= RCR_DM; /* DMA descriptor mode */
1249 val |= RCR_RS; /* automatically store rx status */
1250 val |= RCR_DIE; /* interrupt at end of DMA chain */
1251 val |= RCR_RFI(7); /* interrupt after 7 frames received */
1252 val |= RCR_BTS(2); /* 32-byte bus transaction size */
1253 nb8800_writel(priv, NB8800_RXC_CR, val);
1254
1255 /* The rx interrupt can fire before the DMA has completed
1256 * unless a small delay is added. 50 us is hopefully enough.
1257 */
1258 priv->rx_itr_irq = clk_get_rate(priv->clk) / 20000;
1259
1260 /* In NAPI poll mode we want to disable interrupts, but the
1261 * hardware does not permit this. Delay 10 ms instead.
1262 */
1263 priv->rx_itr_poll = clk_get_rate(priv->clk) / 100;
1264
1265 nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_irq);
1266
1267 priv->rx_dma_config = RX_BUF_SIZE | DESC_BTS(2) | DESC_DS | DESC_EOF;
1268
1269 /* Flow control settings */
1270
1271 /* Pause time of 0.1 ms */
1272 val = 100000 / 512;
1273 nb8800_writeb(priv, NB8800_PQ1, val >> 8);
1274 nb8800_writeb(priv, NB8800_PQ2, val & 0xff);
1275
1276 /* Auto-negotiate by default */
1277 priv->pause_aneg = true;
1278 priv->pause_rx = true;
1279 priv->pause_tx = true;
1280
1281 nb8800_mc_init(dev, 0);
1282
1283 return 0;
1284}
1285
1286static int nb8800_tangox_init(struct net_device *dev)
1287{
1288 struct nb8800_priv *priv = netdev_priv(dev);
1289 u32 pad_mode = PAD_MODE_MII;
1290
1291 switch (priv->phy_mode) {
1292 case PHY_INTERFACE_MODE_MII:
1293 case PHY_INTERFACE_MODE_GMII:
1294 pad_mode = PAD_MODE_MII;
1295 break;
1296
1297 case PHY_INTERFACE_MODE_RGMII:
1298 pad_mode = PAD_MODE_RGMII;
1299 break;
1300
1301 case PHY_INTERFACE_MODE_RGMII_TXID:
1302 pad_mode = PAD_MODE_RGMII | PAD_MODE_GTX_CLK_DELAY;
1303 break;
1304
1305 default:
1306 dev_err(dev->dev.parent, "unsupported phy mode %s\n",
1307 phy_modes(priv->phy_mode));
1308 return -EINVAL;
1309 }
1310
1311 nb8800_writeb(priv, NB8800_TANGOX_PAD_MODE, pad_mode);
1312
1313 return 0;
1314}
1315
1316static int nb8800_tangox_reset(struct net_device *dev)
1317{
1318 struct nb8800_priv *priv = netdev_priv(dev);
1319 int clk_div;
1320
1321 nb8800_writeb(priv, NB8800_TANGOX_RESET, 0);
1322 usleep_range(1000, 10000);
1323 nb8800_writeb(priv, NB8800_TANGOX_RESET, 1);
1324
1325 wmb(); /* ensure reset is cleared before proceeding */
1326
1327 clk_div = DIV_ROUND_UP(clk_get_rate(priv->clk), 2 * MAX_MDC_CLOCK);
1328 nb8800_writew(priv, NB8800_TANGOX_MDIO_CLKDIV, clk_div);
1329
1330 return 0;
1331}
1332
1333static const struct nb8800_ops nb8800_tangox_ops = {
1334 .init = nb8800_tangox_init,
1335 .reset = nb8800_tangox_reset,
1336};
1337
1338static int nb8800_tango4_init(struct net_device *dev)
1339{
1340 struct nb8800_priv *priv = netdev_priv(dev);
1341 int err;
1342
1343 err = nb8800_tangox_init(dev);
1344 if (err)
1345 return err;
1346
1347 /* On tango4 interrupt on DMA completion per frame works and gives
1348 * better performance despite generating more rx interrupts.
1349 */
1350
1351 /* Disable unnecessary interrupt on rx completion */
1352 nb8800_clearl(priv, NB8800_RXC_CR, RCR_RFI(7));
1353
1354 /* Request interrupt on descriptor DMA completion */
1355 priv->rx_dma_config |= DESC_ID;
1356
1357 return 0;
1358}
1359
1360static const struct nb8800_ops nb8800_tango4_ops = {
1361 .init = nb8800_tango4_init,
1362 .reset = nb8800_tangox_reset,
1363};
1364
1365static const struct of_device_id nb8800_dt_ids[] = {
1366 {
1367 .compatible = "aurora,nb8800",
1368 },
1369 {
1370 .compatible = "sigma,smp8642-ethernet",
1371 .data = &nb8800_tangox_ops,
1372 },
1373 {
1374 .compatible = "sigma,smp8734-ethernet",
1375 .data = &nb8800_tango4_ops,
1376 },
1377 { }
1378};
1379
1380static int nb8800_probe(struct platform_device *pdev)
1381{
1382 const struct of_device_id *match;
1383 const struct nb8800_ops *ops = NULL;
1384 struct nb8800_priv *priv;
1385 struct resource *res;
1386 struct net_device *dev;
1387 struct mii_bus *bus;
1388 const unsigned char *mac;
1389 void __iomem *base;
1390 int irq;
1391 int ret;
1392
1393 match = of_match_device(nb8800_dt_ids, &pdev->dev);
1394 if (match)
1395 ops = match->data;
1396
1397 irq = platform_get_irq(pdev, 0);
1398 if (irq <= 0) {
1399 dev_err(&pdev->dev, "No IRQ\n");
1400 return -EINVAL;
1401 }
1402
1403 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1404 base = devm_ioremap_resource(&pdev->dev, res);
1405 if (IS_ERR(base))
1406 return PTR_ERR(base);
1407
1408 dev_dbg(&pdev->dev, "AU-NB8800 Ethernet at %pa\n", &res->start);
1409
1410 dev = alloc_etherdev(sizeof(*priv));
1411 if (!dev)
1412 return -ENOMEM;
1413
1414 platform_set_drvdata(pdev, dev);
1415 SET_NETDEV_DEV(dev, &pdev->dev);
1416
1417 priv = netdev_priv(dev);
1418 priv->base = base;
1419
1420 priv->phy_mode = of_get_phy_mode(pdev->dev.of_node);
1421 if (priv->phy_mode < 0)
1422 priv->phy_mode = PHY_INTERFACE_MODE_RGMII;
1423
1424 priv->clk = devm_clk_get(&pdev->dev, NULL);
1425 if (IS_ERR(priv->clk)) {
1426 dev_err(&pdev->dev, "failed to get clock\n");
1427 ret = PTR_ERR(priv->clk);
1428 goto err_free_dev;
1429 }
1430
1431 ret = clk_prepare_enable(priv->clk);
1432 if (ret)
1433 goto err_free_dev;
1434
1435 spin_lock_init(&priv->tx_lock);
1436
1437 if (ops && ops->reset) {
1438 ret = ops->reset(dev);
1439 if (ret)
1440 goto err_free_dev;
1441 }
1442
1443 bus = devm_mdiobus_alloc(&pdev->dev);
1444 if (!bus) {
1445 ret = -ENOMEM;
1446 goto err_disable_clk;
1447 }
1448
1449 bus->name = "nb8800-mii";
1450 bus->read = nb8800_mdio_read;
1451 bus->write = nb8800_mdio_write;
1452 bus->parent = &pdev->dev;
1453 snprintf(bus->id, MII_BUS_ID_SIZE, "%lx.nb8800-mii",
1454 (unsigned long)res->start);
1455 bus->priv = priv;
1456
1457 ret = of_mdiobus_register(bus, pdev->dev.of_node);
1458 if (ret) {
1459 dev_err(&pdev->dev, "failed to register MII bus\n");
1460 goto err_disable_clk;
1461 }
1462
1463 priv->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1464 if (!priv->phy_node) {
1465 dev_err(&pdev->dev, "no PHY specified\n");
1466 ret = -ENODEV;
1467 goto err_free_bus;
1468 }
1469
1470 priv->mii_bus = bus;
1471
1472 ret = nb8800_hw_init(dev);
1473 if (ret)
1474 goto err_free_bus;
1475
1476 if (ops && ops->init) {
1477 ret = ops->init(dev);
1478 if (ret)
1479 goto err_free_bus;
1480 }
1481
1482 dev->netdev_ops = &nb8800_netdev_ops;
1483 dev->ethtool_ops = &nb8800_ethtool_ops;
1484 dev->flags |= IFF_MULTICAST;
1485 dev->irq = irq;
1486
1487 mac = of_get_mac_address(pdev->dev.of_node);
1488 if (mac)
1489 ether_addr_copy(dev->dev_addr, mac);
1490
1491 if (!is_valid_ether_addr(dev->dev_addr))
1492 eth_hw_addr_random(dev);
1493
1494 nb8800_update_mac_addr(dev);
1495
1496 netif_carrier_off(dev);
1497
1498 ret = register_netdev(dev);
1499 if (ret) {
1500 netdev_err(dev, "failed to register netdev\n");
1501 goto err_free_dma;
1502 }
1503
1504 netif_napi_add(dev, &priv->napi, nb8800_poll, NAPI_POLL_WEIGHT);
1505
1506 netdev_info(dev, "MAC address %pM\n", dev->dev_addr);
1507
1508 return 0;
1509
1510err_free_dma:
1511 nb8800_dma_free(dev);
1512err_free_bus:
1513 mdiobus_unregister(bus);
1514err_disable_clk:
1515 clk_disable_unprepare(priv->clk);
1516err_free_dev:
1517 free_netdev(dev);
1518
1519 return ret;
1520}
1521
1522static int nb8800_remove(struct platform_device *pdev)
1523{
1524 struct net_device *ndev = platform_get_drvdata(pdev);
1525 struct nb8800_priv *priv = netdev_priv(ndev);
1526
1527 unregister_netdev(ndev);
1528
1529 mdiobus_unregister(priv->mii_bus);
1530
1531 clk_disable_unprepare(priv->clk);
1532
1533 nb8800_dma_free(ndev);
1534 free_netdev(ndev);
1535
1536 return 0;
1537}
1538
1539static struct platform_driver nb8800_driver = {
1540 .driver = {
1541 .name = "nb8800",
1542 .of_match_table = nb8800_dt_ids,
1543 },
1544 .probe = nb8800_probe,
1545 .remove = nb8800_remove,
1546};
1547
1548module_platform_driver(nb8800_driver);
1549
1550MODULE_DESCRIPTION("Aurora AU-NB8800 Ethernet driver");
1551MODULE_AUTHOR("Mans Rullgard <mans@mansr.com>");
1552MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/aurora/nb8800.h b/drivers/net/ethernet/aurora/nb8800.h
new file mode 100644
index 000000000000..e5adbc2aac9f
--- /dev/null
+++ b/drivers/net/ethernet/aurora/nb8800.h
@@ -0,0 +1,316 @@
1#ifndef _NB8800_H_
2#define _NB8800_H_
3
4#include <linux/types.h>
5#include <linux/skbuff.h>
6#include <linux/phy.h>
7#include <linux/clk.h>
8#include <linux/bitops.h>
9
10#define RX_DESC_COUNT 256
11#define TX_DESC_COUNT 256
12
13#define NB8800_DESC_LOW 4
14
15#define RX_BUF_SIZE 1552
16
17#define RX_COPYBREAK 256
18#define RX_COPYHDR 128
19
20#define MAX_MDC_CLOCK 2500000
21
22/* Stargate Solutions SSN8800 core registers */
23#define NB8800_TX_CTL1 0x000
24#define TX_TPD BIT(5)
25#define TX_APPEND_FCS BIT(4)
26#define TX_PAD_EN BIT(3)
27#define TX_RETRY_EN BIT(2)
28#define TX_EN BIT(0)
29
30#define NB8800_TX_CTL2 0x001
31
32#define NB8800_RX_CTL 0x004
33#define RX_BC_DISABLE BIT(7)
34#define RX_RUNT BIT(6)
35#define RX_AF_EN BIT(5)
36#define RX_PAUSE_EN BIT(3)
37#define RX_SEND_CRC BIT(2)
38#define RX_PAD_STRIP BIT(1)
39#define RX_EN BIT(0)
40
41#define NB8800_RANDOM_SEED 0x008
42#define NB8800_TX_SDP 0x14
43#define NB8800_TX_TPDP1 0x18
44#define NB8800_TX_TPDP2 0x19
45#define NB8800_SLOT_TIME 0x1c
46
47#define NB8800_MDIO_CMD 0x020
48#define MDIO_CMD_GO BIT(31)
49#define MDIO_CMD_WR BIT(26)
50#define MDIO_CMD_ADDR(x) ((x) << 21)
51#define MDIO_CMD_REG(x) ((x) << 16)
52#define MDIO_CMD_DATA(x) ((x) << 0)
53
54#define NB8800_MDIO_STS 0x024
55#define MDIO_STS_ERR BIT(31)
56
57#define NB8800_MC_ADDR(i) (0x028 + (i))
58#define NB8800_MC_INIT 0x02e
59#define NB8800_UC_ADDR(i) (0x03c + (i))
60
61#define NB8800_MAC_MODE 0x044
62#define RGMII_MODE BIT(7)
63#define HALF_DUPLEX BIT(4)
64#define BURST_EN BIT(3)
65#define LOOPBACK_EN BIT(2)
66#define GMAC_MODE BIT(0)
67
68#define NB8800_IC_THRESHOLD 0x050
69#define NB8800_PE_THRESHOLD 0x051
70#define NB8800_PF_THRESHOLD 0x052
71#define NB8800_TX_BUFSIZE 0x054
72#define NB8800_FIFO_CTL 0x056
73#define NB8800_PQ1 0x060
74#define NB8800_PQ2 0x061
75#define NB8800_SRC_ADDR(i) (0x06a + (i))
76#define NB8800_STAT_DATA 0x078
77#define NB8800_STAT_INDEX 0x07c
78#define NB8800_STAT_CLEAR 0x07d
79
80#define NB8800_SLEEP_MODE 0x07e
81#define SLEEP_MODE BIT(0)
82
83#define NB8800_WAKEUP 0x07f
84#define WAKEUP BIT(0)
85
86/* Aurora NB8800 host interface registers */
87#define NB8800_TXC_CR 0x100
88#define TCR_LK BIT(12)
89#define TCR_DS BIT(11)
90#define TCR_BTS(x) (((x) & 0x7) << 8)
91#define TCR_DIE BIT(7)
92#define TCR_TFI(x) (((x) & 0x7) << 4)
93#define TCR_LE BIT(3)
94#define TCR_RS BIT(2)
95#define TCR_DM BIT(1)
96#define TCR_EN BIT(0)
97
98#define NB8800_TXC_SR 0x104
99#define TSR_DE BIT(3)
100#define TSR_DI BIT(2)
101#define TSR_TO BIT(1)
102#define TSR_TI BIT(0)
103
104#define NB8800_TX_SAR 0x108
105#define NB8800_TX_DESC_ADDR 0x10c
106
107#define NB8800_TX_REPORT_ADDR 0x110
108#define TX_BYTES_TRANSFERRED(x) (((x) >> 16) & 0xffff)
109#define TX_FIRST_DEFERRAL BIT(7)
110#define TX_EARLY_COLLISIONS(x) (((x) >> 3) & 0xf)
111#define TX_LATE_COLLISION BIT(2)
112#define TX_PACKET_DROPPED BIT(1)
113#define TX_FIFO_UNDERRUN BIT(0)
114#define IS_TX_ERROR(r) ((r) & 0x07)
115
116#define NB8800_TX_FIFO_SR 0x114
117#define NB8800_TX_ITR 0x118
118
119#define NB8800_RXC_CR 0x200
120#define RCR_FL BIT(13)
121#define RCR_LK BIT(12)
122#define RCR_DS BIT(11)
123#define RCR_BTS(x) (((x) & 7) << 8)
124#define RCR_DIE BIT(7)
125#define RCR_RFI(x) (((x) & 7) << 4)
126#define RCR_LE BIT(3)
127#define RCR_RS BIT(2)
128#define RCR_DM BIT(1)
129#define RCR_EN BIT(0)
130
131#define NB8800_RXC_SR 0x204
132#define RSR_DE BIT(3)
133#define RSR_DI BIT(2)
134#define RSR_RO BIT(1)
135#define RSR_RI BIT(0)
136
137#define NB8800_RX_SAR 0x208
138#define NB8800_RX_DESC_ADDR 0x20c
139
140#define NB8800_RX_REPORT_ADDR 0x210
141#define RX_BYTES_TRANSFERRED(x) (((x) >> 16) & 0xFFFF)
142#define RX_MULTICAST_PKT BIT(9)
143#define RX_BROADCAST_PKT BIT(8)
144#define RX_LENGTH_ERR BIT(7)
145#define RX_FCS_ERR BIT(6)
146#define RX_RUNT_PKT BIT(5)
147#define RX_FIFO_OVERRUN BIT(4)
148#define RX_LATE_COLLISION BIT(3)
149#define RX_ALIGNMENT_ERROR BIT(2)
150#define RX_ERROR_MASK 0xfc
151#define IS_RX_ERROR(r) ((r) & RX_ERROR_MASK)
152
153#define NB8800_RX_FIFO_SR 0x214
154#define NB8800_RX_ITR 0x218
155
156/* Sigma Designs SMP86xx additional registers */
157#define NB8800_TANGOX_PAD_MODE 0x400
158#define PAD_MODE_MASK 0x7
159#define PAD_MODE_MII 0x0
160#define PAD_MODE_RGMII 0x1
161#define PAD_MODE_GTX_CLK_INV BIT(3)
162#define PAD_MODE_GTX_CLK_DELAY BIT(4)
163
164#define NB8800_TANGOX_MDIO_CLKDIV 0x420
165#define NB8800_TANGOX_RESET 0x424
166
167/* Hardware DMA descriptor */
168struct nb8800_dma_desc {
169 u32 s_addr; /* start address */
170 u32 n_addr; /* next descriptor address */
171 u32 r_addr; /* report address */
172 u32 config;
173} __aligned(8);
174
175#define DESC_ID BIT(23)
176#define DESC_EOC BIT(22)
177#define DESC_EOF BIT(21)
178#define DESC_LK BIT(20)
179#define DESC_DS BIT(19)
180#define DESC_BTS(x) (((x) & 0x7) << 16)
181
182/* DMA descriptor and associated data for rx.
183 * Allocated from coherent memory.
184 */
185struct nb8800_rx_desc {
186 /* DMA descriptor */
187 struct nb8800_dma_desc desc;
188
189 /* Status report filled in by hardware */
190 u32 report;
191};
192
193/* Address of buffer on rx ring */
194struct nb8800_rx_buf {
195 struct page *page;
196 unsigned long offset;
197};
198
199/* DMA descriptors and associated data for tx.
200 * Allocated from coherent memory.
201 */
202struct nb8800_tx_desc {
203 /* DMA descriptor. The second descriptor is used if packet
204 * data is unaligned.
205 */
206 struct nb8800_dma_desc desc[2];
207
208 /* Status report filled in by hardware */
209 u32 report;
210
211 /* Bounce buffer for initial unaligned part of packet */
212 u8 buf[8] __aligned(8);
213};
214
215/* Packet in tx queue */
216struct nb8800_tx_buf {
217 /* Currently queued skb */
218 struct sk_buff *skb;
219
220 /* DMA address of the first descriptor */
221 dma_addr_t dma_desc;
222
223 /* DMA address of packet data */
224 dma_addr_t dma_addr;
225
226 /* Length of DMA mapping, less than skb->len if alignment
227 * buffer is used.
228 */
229 unsigned int dma_len;
230
231 /* Number of packets in chain starting here */
232 unsigned int chain_len;
233
234 /* Packet chain ready to be submitted to hardware */
235 bool ready;
236};
237
238struct nb8800_priv {
239 struct napi_struct napi;
240
241 void __iomem *base;
242
243 /* RX DMA descriptors */
244 struct nb8800_rx_desc *rx_descs;
245
246 /* RX buffers referenced by DMA descriptors */
247 struct nb8800_rx_buf *rx_bufs;
248
249 /* Current end of chain */
250 u32 rx_eoc;
251
252 /* Value for rx interrupt time register in NAPI interrupt mode */
253 u32 rx_itr_irq;
254
255 /* Value for rx interrupt time register in NAPI poll mode */
256 u32 rx_itr_poll;
257
258 /* Value for config field of rx DMA descriptors */
259 u32 rx_dma_config;
260
261 /* TX DMA descriptors */
262 struct nb8800_tx_desc *tx_descs;
263
264 /* TX packet queue */
265 struct nb8800_tx_buf *tx_bufs;
266
267 /* Number of free tx queue entries */
268 atomic_t tx_free;
269
270 /* First free tx queue entry */
271 u32 tx_next;
272
273 /* Next buffer to transmit */
274 u32 tx_queue;
275
276 /* Start of current packet chain */
277 struct nb8800_tx_buf *tx_chain;
278
279 /* Next buffer to reclaim */
280 u32 tx_done;
281
282 /* Lock for DMA activation */
283 spinlock_t tx_lock;
284
285 struct mii_bus *mii_bus;
286 struct device_node *phy_node;
287 struct phy_device *phydev;
288
289 /* PHY connection type from DT */
290 int phy_mode;
291
292 /* Current link status */
293 int speed;
294 int duplex;
295 int link;
296
297 /* Pause settings */
298 bool pause_aneg;
299 bool pause_rx;
300 bool pause_tx;
301
302 /* DMA base address of rx descriptors, see rx_descs above */
303 dma_addr_t rx_desc_dma;
304
305 /* DMA base address of tx descriptors, see tx_descs above */
306 dma_addr_t tx_desc_dma;
307
308 struct clk *clk;
309};
310
311struct nb8800_ops {
312 int (*init)(struct net_device *dev);
313 int (*reset)(struct net_device *dev);
314};
315
316#endif /* _NB8800_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index f1d62d5dbaff..2e611dc5f162 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -10139,8 +10139,8 @@ static void __bnx2x_del_vxlan_port(struct bnx2x *bp, u16 port)
10139 DP(BNX2X_MSG_SP, "Invalid vxlan port\n"); 10139 DP(BNX2X_MSG_SP, "Invalid vxlan port\n");
10140 return; 10140 return;
10141 } 10141 }
10142 bp->vxlan_dst_port--; 10142 bp->vxlan_dst_port_count--;
10143 if (bp->vxlan_dst_port) 10143 if (bp->vxlan_dst_port_count)
10144 return; 10144 return;
10145 10145
10146 if (netif_running(bp->dev)) { 10146 if (netif_running(bp->dev)) {
@@ -13207,7 +13207,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
13207 13207
13208 /* VF with OLD Hypervisor or old PF do not support filtering */ 13208 /* VF with OLD Hypervisor or old PF do not support filtering */
13209 if (IS_PF(bp)) { 13209 if (IS_PF(bp)) {
13210 if (CHIP_IS_E1x(bp)) 13210 if (chip_is_e1x)
13211 bp->accept_any_vlan = true; 13211 bp->accept_any_vlan = true;
13212 else 13212 else
13213 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 13213 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index db15c5ee09c5..bdf094fb6ef9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -3625,6 +3625,7 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
3625 pf->fw_fid = le16_to_cpu(resp->fid); 3625 pf->fw_fid = le16_to_cpu(resp->fid);
3626 pf->port_id = le16_to_cpu(resp->port_id); 3626 pf->port_id = le16_to_cpu(resp->port_id);
3627 memcpy(pf->mac_addr, resp->perm_mac_address, ETH_ALEN); 3627 memcpy(pf->mac_addr, resp->perm_mac_address, ETH_ALEN);
3628 memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
3628 pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 3629 pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
3629 pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 3630 pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
3630 pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 3631 pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
@@ -3648,8 +3649,11 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
3648 3649
3649 vf->fw_fid = le16_to_cpu(resp->fid); 3650 vf->fw_fid = le16_to_cpu(resp->fid);
3650 memcpy(vf->mac_addr, resp->perm_mac_address, ETH_ALEN); 3651 memcpy(vf->mac_addr, resp->perm_mac_address, ETH_ALEN);
3651 if (!is_valid_ether_addr(vf->mac_addr)) 3652 if (is_valid_ether_addr(vf->mac_addr))
3652 random_ether_addr(vf->mac_addr); 3653 /* overwrite netdev dev_adr with admin VF MAC */
3654 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
3655 else
3656 random_ether_addr(bp->dev->dev_addr);
3653 3657
3654 vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 3658 vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
3655 vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 3659 vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
@@ -3880,6 +3884,8 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
3880#endif 3884#endif
3881} 3885}
3882 3886
3887static int bnxt_cfg_rx_mode(struct bnxt *);
3888
3883static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) 3889static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
3884{ 3890{
3885 int rc = 0; 3891 int rc = 0;
@@ -3946,11 +3952,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
3946 bp->vnic_info[0].rx_mask |= 3952 bp->vnic_info[0].rx_mask |=
3947 CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 3953 CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
3948 3954
3949 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 3955 rc = bnxt_cfg_rx_mode(bp);
3950 if (rc) { 3956 if (rc)
3951 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", rc);
3952 goto err_out; 3957 goto err_out;
3953 }
3954 3958
3955 rc = bnxt_hwrm_set_coal(bp); 3959 rc = bnxt_hwrm_set_coal(bp);
3956 if (rc) 3960 if (rc)
@@ -4865,7 +4869,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
4865 } 4869 }
4866} 4870}
4867 4871
4868static void bnxt_cfg_rx_mode(struct bnxt *bp) 4872static int bnxt_cfg_rx_mode(struct bnxt *bp)
4869{ 4873{
4870 struct net_device *dev = bp->dev; 4874 struct net_device *dev = bp->dev;
4871 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 4875 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
@@ -4914,6 +4918,7 @@ static void bnxt_cfg_rx_mode(struct bnxt *bp)
4914 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", 4918 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
4915 rc); 4919 rc);
4916 vnic->uc_filter_count = i; 4920 vnic->uc_filter_count = i;
4921 return rc;
4917 } 4922 }
4918 } 4923 }
4919 4924
@@ -4922,6 +4927,8 @@ skip_uc:
4922 if (rc) 4927 if (rc)
4923 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", 4928 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
4924 rc); 4929 rc);
4930
4931 return rc;
4925} 4932}
4926 4933
4927static netdev_features_t bnxt_fix_features(struct net_device *dev, 4934static netdev_features_t bnxt_fix_features(struct net_device *dev,
@@ -5212,13 +5219,27 @@ init_err:
5212static int bnxt_change_mac_addr(struct net_device *dev, void *p) 5219static int bnxt_change_mac_addr(struct net_device *dev, void *p)
5213{ 5220{
5214 struct sockaddr *addr = p; 5221 struct sockaddr *addr = p;
5222 struct bnxt *bp = netdev_priv(dev);
5223 int rc = 0;
5215 5224
5216 if (!is_valid_ether_addr(addr->sa_data)) 5225 if (!is_valid_ether_addr(addr->sa_data))
5217 return -EADDRNOTAVAIL; 5226 return -EADDRNOTAVAIL;
5218 5227
5228#ifdef CONFIG_BNXT_SRIOV
5229 if (BNXT_VF(bp) && is_valid_ether_addr(bp->vf.mac_addr))
5230 return -EADDRNOTAVAIL;
5231#endif
5232
5233 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
5234 return 0;
5235
5219 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 5236 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5237 if (netif_running(dev)) {
5238 bnxt_close_nic(bp, false, false);
5239 rc = bnxt_open_nic(bp, false, false);
5240 }
5220 5241
5221 return 0; 5242 return rc;
5222} 5243}
5223 5244
5224/* rtnl_lock held */ 5245/* rtnl_lock held */
@@ -5686,15 +5707,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5686 bnxt_set_tpa_flags(bp); 5707 bnxt_set_tpa_flags(bp);
5687 bnxt_set_ring_params(bp); 5708 bnxt_set_ring_params(bp);
5688 dflt_rings = netif_get_num_default_rss_queues(); 5709 dflt_rings = netif_get_num_default_rss_queues();
5689 if (BNXT_PF(bp)) { 5710 if (BNXT_PF(bp))
5690 memcpy(dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
5691 bp->pf.max_irqs = max_irqs; 5711 bp->pf.max_irqs = max_irqs;
5692 } else {
5693#if defined(CONFIG_BNXT_SRIOV) 5712#if defined(CONFIG_BNXT_SRIOV)
5694 memcpy(dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); 5713 else
5695 bp->vf.max_irqs = max_irqs; 5714 bp->vf.max_irqs = max_irqs;
5696#endif 5715#endif
5697 }
5698 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings); 5716 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
5699 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); 5717 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
5700 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); 5718 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index f4cf68861069..7a9af2887d8e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -804,10 +804,9 @@ void bnxt_update_vf_mac(struct bnxt *bp)
804 if (!is_valid_ether_addr(resp->perm_mac_address)) 804 if (!is_valid_ether_addr(resp->perm_mac_address))
805 goto update_vf_mac_exit; 805 goto update_vf_mac_exit;
806 806
807 if (ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr)) 807 if (!ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr))
808 goto update_vf_mac_exit; 808 memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN);
809 809 /* overwrite netdev dev_adr with admin VF MAC */
810 memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN);
811 memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); 810 memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
812update_vf_mac_exit: 811update_vf_mac_exit:
813 mutex_unlock(&bp->hwrm_cmd_lock); 812 mutex_unlock(&bp->hwrm_cmd_lock);
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 88c1e1a834f8..169059c92f80 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -1682,6 +1682,8 @@ static void macb_init_hw(struct macb *bp)
1682 macb_set_hwaddr(bp); 1682 macb_set_hwaddr(bp);
1683 1683
1684 config = macb_mdc_clk_div(bp); 1684 config = macb_mdc_clk_div(bp);
1685 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
1686 config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
1685 config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */ 1687 config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */
1686 config |= MACB_BIT(PAE); /* PAuse Enable */ 1688 config |= MACB_BIT(PAE); /* PAuse Enable */
1687 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ 1689 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
@@ -2416,6 +2418,8 @@ static int macb_init(struct platform_device *pdev)
2416 /* Set MII management clock divider */ 2418 /* Set MII management clock divider */
2417 val = macb_mdc_clk_div(bp); 2419 val = macb_mdc_clk_div(bp);
2418 val |= macb_dbw(bp); 2420 val |= macb_dbw(bp);
2421 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
2422 val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
2419 macb_writel(bp, NCFGR, val); 2423 macb_writel(bp, NCFGR, val);
2420 2424
2421 return 0; 2425 return 0;
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 6e1faea00ca8..d83b0db77821 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -215,12 +215,17 @@
215/* GEM specific NCFGR bitfields. */ 215/* GEM specific NCFGR bitfields. */
216#define GEM_GBE_OFFSET 10 /* Gigabit mode enable */ 216#define GEM_GBE_OFFSET 10 /* Gigabit mode enable */
217#define GEM_GBE_SIZE 1 217#define GEM_GBE_SIZE 1
218#define GEM_PCSSEL_OFFSET 11
219#define GEM_PCSSEL_SIZE 1
218#define GEM_CLK_OFFSET 18 /* MDC clock division */ 220#define GEM_CLK_OFFSET 18 /* MDC clock division */
219#define GEM_CLK_SIZE 3 221#define GEM_CLK_SIZE 3
220#define GEM_DBW_OFFSET 21 /* Data bus width */ 222#define GEM_DBW_OFFSET 21 /* Data bus width */
221#define GEM_DBW_SIZE 2 223#define GEM_DBW_SIZE 2
222#define GEM_RXCOEN_OFFSET 24 224#define GEM_RXCOEN_OFFSET 24
223#define GEM_RXCOEN_SIZE 1 225#define GEM_RXCOEN_SIZE 1
226#define GEM_SGMIIEN_OFFSET 27
227#define GEM_SGMIIEN_SIZE 1
228
224 229
225/* Constants for data bus width. */ 230/* Constants for data bus width. */
226#define GEM_DBW32 0 /* 32 bit AMBA AHB data bus width */ 231#define GEM_DBW32 0 /* 32 bit AMBA AHB data bus width */
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index f683d97d7614..b89504405b72 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -560,7 +560,7 @@ static int liquidio_resume(struct pci_dev *pdev)
560#endif 560#endif
561 561
562/* For PCI-E Advanced Error Recovery (AER) Interface */ 562/* For PCI-E Advanced Error Recovery (AER) Interface */
563static struct pci_error_handlers liquidio_err_handler = { 563static const struct pci_error_handlers liquidio_err_handler = {
564 .error_detected = liquidio_pcie_error_detected, 564 .error_detected = liquidio_pcie_error_detected,
565 .mmio_enabled = liquidio_pcie_mmio_enabled, 565 .mmio_enabled = liquidio_pcie_mmio_enabled,
566 .slot_reset = liquidio_pcie_slot_reset, 566 .slot_reset = liquidio_pcie_slot_reset,
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index d3950b20feb9..39ca6744a4e6 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -120,10 +120,9 @@
120 * Calculated for SCLK of 700Mhz 120 * Calculated for SCLK of 700Mhz
121 * value written should be a 1/16th of what is expected 121 * value written should be a 1/16th of what is expected
122 * 122 *
123 * 1 tick per 0.05usec = value of 2.2 123 * 1 tick per 0.025usec
124 * This 10% would be covered in CQ timer thresh value
125 */ 124 */
126#define NICPF_CLK_PER_INT_TICK 2 125#define NICPF_CLK_PER_INT_TICK 1
127 126
128/* Time to wait before we decide that a SQ is stuck. 127/* Time to wait before we decide that a SQ is stuck.
129 * 128 *
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index c561fdcb79a7..4b7fd63ae57c 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -37,6 +37,7 @@ struct nicpf {
37#define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF) 37#define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF)
38#define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF) 38#define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF)
39 u8 vf_lmac_map[MAX_LMAC]; 39 u8 vf_lmac_map[MAX_LMAC];
40 u8 lmac_cnt;
40 struct delayed_work dwork; 41 struct delayed_work dwork;
41 struct workqueue_struct *check_link; 42 struct workqueue_struct *check_link;
42 u8 link[MAX_LMAC]; 43 u8 link[MAX_LMAC];
@@ -279,6 +280,7 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic)
279 u64 lmac_credit; 280 u64 lmac_credit;
280 281
281 nic->num_vf_en = 0; 282 nic->num_vf_en = 0;
283 nic->lmac_cnt = 0;
282 284
283 for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) { 285 for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) {
284 if (!(bgx_map & (1 << bgx))) 286 if (!(bgx_map & (1 << bgx)))
@@ -288,6 +290,7 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic)
288 nic->vf_lmac_map[next_bgx_lmac++] = 290 nic->vf_lmac_map[next_bgx_lmac++] =
289 NIC_SET_VF_LMAC_MAP(bgx, lmac); 291 NIC_SET_VF_LMAC_MAP(bgx, lmac);
290 nic->num_vf_en += lmac_cnt; 292 nic->num_vf_en += lmac_cnt;
293 nic->lmac_cnt += lmac_cnt;
291 294
292 /* Program LMAC credits */ 295 /* Program LMAC credits */
293 lmac_credit = (1ull << 1); /* channel credit enable */ 296 lmac_credit = (1ull << 1); /* channel credit enable */
@@ -715,6 +718,13 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
715 case NIC_MBOX_MSG_CFG_DONE: 718 case NIC_MBOX_MSG_CFG_DONE:
716 /* Last message of VF config msg sequence */ 719 /* Last message of VF config msg sequence */
717 nic->vf_enabled[vf] = true; 720 nic->vf_enabled[vf] = true;
721 if (vf >= nic->lmac_cnt)
722 goto unlock;
723
724 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
725 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
726
727 bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, true);
718 goto unlock; 728 goto unlock;
719 case NIC_MBOX_MSG_SHUTDOWN: 729 case NIC_MBOX_MSG_SHUTDOWN:
720 /* First msg in VF teardown sequence */ 730 /* First msg in VF teardown sequence */
@@ -722,6 +732,14 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
722 if (vf >= nic->num_vf_en) 732 if (vf >= nic->num_vf_en)
723 nic->sqs_used[vf - nic->num_vf_en] = false; 733 nic->sqs_used[vf - nic->num_vf_en] = false;
724 nic->pqs_vf[vf] = 0; 734 nic->pqs_vf[vf] = 0;
735
736 if (vf >= nic->lmac_cnt)
737 break;
738
739 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
740 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
741
742 bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, false);
725 break; 743 break;
726 case NIC_MBOX_MSG_ALLOC_SQS: 744 case NIC_MBOX_MSG_ALLOC_SQS:
727 nic_alloc_sqs(nic, &mbx.sqs_alloc); 745 nic_alloc_sqs(nic, &mbx.sqs_alloc);
@@ -940,7 +958,7 @@ static void nic_poll_for_link(struct work_struct *work)
940 958
941 mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE; 959 mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
942 960
943 for (vf = 0; vf < nic->num_vf_en; vf++) { 961 for (vf = 0; vf < nic->lmac_cnt; vf++) {
944 /* Poll only if VF is UP */ 962 /* Poll only if VF is UP */
945 if (!nic->vf_enabled[vf]) 963 if (!nic->vf_enabled[vf])
946 continue; 964 continue;
@@ -1074,8 +1092,7 @@ static void nic_remove(struct pci_dev *pdev)
1074 1092
1075 if (nic->check_link) { 1093 if (nic->check_link) {
1076 /* Destroy work Queue */ 1094 /* Destroy work Queue */
1077 cancel_delayed_work(&nic->dwork); 1095 cancel_delayed_work_sync(&nic->dwork);
1078 flush_workqueue(nic->check_link);
1079 destroy_workqueue(nic->check_link); 1096 destroy_workqueue(nic->check_link);
1080 } 1097 }
1081 1098
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index af54c10945c2..a12b2e38cf61 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -112,6 +112,13 @@ static int nicvf_get_settings(struct net_device *netdev,
112 112
113 cmd->supported = 0; 113 cmd->supported = 0;
114 cmd->transceiver = XCVR_EXTERNAL; 114 cmd->transceiver = XCVR_EXTERNAL;
115
116 if (!nic->link_up) {
117 cmd->duplex = DUPLEX_UNKNOWN;
118 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
119 return 0;
120 }
121
115 if (nic->speed <= 1000) { 122 if (nic->speed <= 1000) {
116 cmd->port = PORT_MII; 123 cmd->port = PORT_MII;
117 cmd->autoneg = AUTONEG_ENABLE; 124 cmd->autoneg = AUTONEG_ENABLE;
@@ -125,6 +132,13 @@ static int nicvf_get_settings(struct net_device *netdev,
125 return 0; 132 return 0;
126} 133}
127 134
135static u32 nicvf_get_link(struct net_device *netdev)
136{
137 struct nicvf *nic = netdev_priv(netdev);
138
139 return nic->link_up;
140}
141
128static void nicvf_get_drvinfo(struct net_device *netdev, 142static void nicvf_get_drvinfo(struct net_device *netdev,
129 struct ethtool_drvinfo *info) 143 struct ethtool_drvinfo *info)
130{ 144{
@@ -660,7 +674,7 @@ static int nicvf_set_channels(struct net_device *dev,
660 674
661static const struct ethtool_ops nicvf_ethtool_ops = { 675static const struct ethtool_ops nicvf_ethtool_ops = {
662 .get_settings = nicvf_get_settings, 676 .get_settings = nicvf_get_settings,
663 .get_link = ethtool_op_get_link, 677 .get_link = nicvf_get_link,
664 .get_drvinfo = nicvf_get_drvinfo, 678 .get_drvinfo = nicvf_get_drvinfo,
665 .get_msglevel = nicvf_get_msglevel, 679 .get_msglevel = nicvf_get_msglevel,
666 .set_msglevel = nicvf_set_msglevel, 680 .set_msglevel = nicvf_set_msglevel,
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index a9377727c11c..dde8dc720cd3 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1057,6 +1057,7 @@ int nicvf_stop(struct net_device *netdev)
1057 1057
1058 netif_carrier_off(netdev); 1058 netif_carrier_off(netdev);
1059 netif_tx_stop_all_queues(nic->netdev); 1059 netif_tx_stop_all_queues(nic->netdev);
1060 nic->link_up = false;
1060 1061
1061 /* Teardown secondary qsets first */ 1062 /* Teardown secondary qsets first */
1062 if (!nic->sqs_mode) { 1063 if (!nic->sqs_mode) {
@@ -1211,9 +1212,6 @@ int nicvf_open(struct net_device *netdev)
1211 nic->drv_stats.txq_stop = 0; 1212 nic->drv_stats.txq_stop = 0;
1212 nic->drv_stats.txq_wake = 0; 1213 nic->drv_stats.txq_wake = 0;
1213 1214
1214 netif_carrier_on(netdev);
1215 netif_tx_start_all_queues(netdev);
1216
1217 return 0; 1215 return 0;
1218cleanup: 1216cleanup:
1219 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0); 1217 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
@@ -1583,8 +1581,14 @@ err_disable_device:
1583static void nicvf_remove(struct pci_dev *pdev) 1581static void nicvf_remove(struct pci_dev *pdev)
1584{ 1582{
1585 struct net_device *netdev = pci_get_drvdata(pdev); 1583 struct net_device *netdev = pci_get_drvdata(pdev);
1586 struct nicvf *nic = netdev_priv(netdev); 1584 struct nicvf *nic;
1587 struct net_device *pnetdev = nic->pnicvf->netdev; 1585 struct net_device *pnetdev;
1586
1587 if (!netdev)
1588 return;
1589
1590 nic = netdev_priv(netdev);
1591 pnetdev = nic->pnicvf->netdev;
1588 1592
1589 /* Check if this Qset is assigned to different VF. 1593 /* Check if this Qset is assigned to different VF.
1590 * If yes, clean primary and all secondary Qsets. 1594 * If yes, clean primary and all secondary Qsets.
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index e404ea837727..206b6a71a545 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -592,7 +592,7 @@ void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
592 /* Set threshold value for interrupt generation */ 592 /* Set threshold value for interrupt generation */
593 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); 593 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
594 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, 594 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2,
595 qidx, nic->cq_coalesce_usecs); 595 qidx, CMP_QUEUE_TIMER_THRESH);
596} 596}
597 597
598/* Configures transmit queue */ 598/* Configures transmit queue */
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index fb4957d09914..033e8306e91c 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -76,7 +76,7 @@
76#define CMP_QSIZE CMP_QUEUE_SIZE2 76#define CMP_QSIZE CMP_QUEUE_SIZE2
77#define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10)) 77#define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10))
78#define CMP_QUEUE_CQE_THRESH 0 78#define CMP_QUEUE_CQE_THRESH 0
79#define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */ 79#define CMP_QUEUE_TIMER_THRESH 80 /* ~2usec */
80 80
81#define RBDR_SIZE RBDR_SIZE0 81#define RBDR_SIZE RBDR_SIZE0
82#define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13)) 82#define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13))
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 180aa9fabf48..9df26c2263bc 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -186,6 +186,23 @@ void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
186} 186}
187EXPORT_SYMBOL(bgx_set_lmac_mac); 187EXPORT_SYMBOL(bgx_set_lmac_mac);
188 188
189void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
190{
191 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
192 u64 cfg;
193
194 if (!bgx)
195 return;
196
197 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
198 if (enable)
199 cfg |= CMR_PKT_RX_EN | CMR_PKT_TX_EN;
200 else
201 cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
202 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
203}
204EXPORT_SYMBOL(bgx_lmac_rx_tx_enable);
205
189static void bgx_sgmii_change_link_state(struct lmac *lmac) 206static void bgx_sgmii_change_link_state(struct lmac *lmac)
190{ 207{
191 struct bgx *bgx = lmac->bgx; 208 struct bgx *bgx = lmac->bgx;
@@ -612,6 +629,8 @@ static void bgx_poll_for_link(struct work_struct *work)
612 lmac->last_duplex = 1; 629 lmac->last_duplex = 1;
613 } else { 630 } else {
614 lmac->link_up = 0; 631 lmac->link_up = 0;
632 lmac->last_speed = SPEED_UNKNOWN;
633 lmac->last_duplex = DUPLEX_UNKNOWN;
615 } 634 }
616 635
617 if (lmac->last_link != lmac->link_up) { 636 if (lmac->last_link != lmac->link_up) {
@@ -654,8 +673,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
654 } 673 }
655 674
656 /* Enable lmac */ 675 /* Enable lmac */
657 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, 676 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
658 CMR_EN | CMR_PKT_RX_EN | CMR_PKT_TX_EN);
659 677
660 /* Restore default cfg, incase low level firmware changed it */ 678 /* Restore default cfg, incase low level firmware changed it */
661 bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03); 679 bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03);
@@ -695,8 +713,7 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
695 lmac = &bgx->lmac[lmacid]; 713 lmac = &bgx->lmac[lmacid];
696 if (lmac->check_link) { 714 if (lmac->check_link) {
697 /* Destroy work queue */ 715 /* Destroy work queue */
698 cancel_delayed_work(&lmac->dwork); 716 cancel_delayed_work_sync(&lmac->dwork);
699 flush_workqueue(lmac->check_link);
700 destroy_workqueue(lmac->check_link); 717 destroy_workqueue(lmac->check_link);
701 } 718 }
702 719
@@ -1009,6 +1026,9 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1009 struct bgx *bgx = NULL; 1026 struct bgx *bgx = NULL;
1010 u8 lmac; 1027 u8 lmac;
1011 1028
1029 /* Load octeon mdio driver */
1030 octeon_mdiobus_force_mod_depencency();
1031
1012 bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL); 1032 bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL);
1013 if (!bgx) 1033 if (!bgx)
1014 return -ENOMEM; 1034 return -ENOMEM;
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index 07b7ec66c60d..149e179363a1 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -182,6 +182,8 @@ enum MCAST_MODE {
182#define BCAST_ACCEPT 1 182#define BCAST_ACCEPT 1
183#define CAM_ACCEPT 1 183#define CAM_ACCEPT 1
184 184
185void octeon_mdiobus_force_mod_depencency(void);
186void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable);
185void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac); 187void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac);
186unsigned bgx_get_map(int node); 188unsigned bgx_get_map(int node);
187int bgx_get_lmac_count(int node, int bgx); 189int bgx_get_lmac_count(int node, int bgx);
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index ed41559bae77..b553409e04ad 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -98,8 +98,7 @@ static int csr0 = 0x01A00000 | 0x4800;
98#elif defined(__mips__) 98#elif defined(__mips__)
99static int csr0 = 0x00200000 | 0x4000; 99static int csr0 = 0x00200000 | 0x4000;
100#else 100#else
101#warning Processor architecture undefined! 101static int csr0;
102static int csr0 = 0x00A00000 | 0x4800;
103#endif 102#endif
104 103
105/* Operational parameters that usually are not changed. */ 104/* Operational parameters that usually are not changed. */
@@ -1982,6 +1981,12 @@ static int __init tulip_init (void)
1982 pr_info("%s", version); 1981 pr_info("%s", version);
1983#endif 1982#endif
1984 1983
1984 if (!csr0) {
1985 pr_warn("tulip: unknown CPU architecture, using default csr0\n");
1986 /* default to 8 longword cache line alignment */
1987 csr0 = 0x00A00000 | 0x4800;
1988 }
1989
1985 /* copy module parms into globals */ 1990 /* copy module parms into globals */
1986 tulip_rx_copybreak = rx_copybreak; 1991 tulip_rx_copybreak = rx_copybreak;
1987 tulip_max_interrupt_work = max_interrupt_work; 1992 tulip_max_interrupt_work = max_interrupt_work;
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 9beb3d34d4ba..3c0e4d5c5fef 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -907,7 +907,7 @@ static void init_registers(struct net_device *dev)
907#elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC) || defined(CONFIG_ARM) 907#elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC) || defined(CONFIG_ARM)
908 i |= 0x4800; 908 i |= 0x4800;
909#else 909#else
910#warning Processor architecture undefined 910 dev_warn(&dev->dev, "unknown CPU architecture, using default csr0 setting\n");
911 i |= 0x4800; 911 i |= 0x4800;
912#endif 912#endif
913 iowrite32(i, ioaddr + PCIBusCfg); 913 iowrite32(i, ioaddr + PCIBusCfg);
diff --git a/drivers/net/ethernet/dlink/Kconfig b/drivers/net/ethernet/dlink/Kconfig
index f6e858d0b9d4..ebdc83247bb6 100644
--- a/drivers/net/ethernet/dlink/Kconfig
+++ b/drivers/net/ethernet/dlink/Kconfig
@@ -17,15 +17,16 @@ config NET_VENDOR_DLINK
17if NET_VENDOR_DLINK 17if NET_VENDOR_DLINK
18 18
19config DL2K 19config DL2K
20 tristate "DL2000/TC902x-based Gigabit Ethernet support" 20 tristate "DL2000/TC902x/IP1000A-based Gigabit Ethernet support"
21 depends on PCI 21 depends on PCI
22 select CRC32 22 select CRC32
23 ---help--- 23 ---help---
24 This driver supports DL2000/TC902x-based Gigabit ethernet cards, 24 This driver supports DL2000/TC902x/IP1000A-based Gigabit ethernet cards,
25 which includes 25 which includes
26 D-Link DGE-550T Gigabit Ethernet Adapter. 26 D-Link DGE-550T Gigabit Ethernet Adapter.
27 D-Link DL2000-based Gigabit Ethernet Adapter. 27 D-Link DL2000-based Gigabit Ethernet Adapter.
28 Sundance/Tamarack TC902x Gigabit Ethernet Adapter. 28 Sundance/Tamarack TC902x Gigabit Ethernet Adapter.
29 ICPlus IP1000A-based cards
29 30
30 To compile this driver as a module, choose M here: the 31 To compile this driver as a module, choose M here: the
31 module will be called dl2k. 32 module will be called dl2k.
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index cf0a5fcdaaaf..ccca4799c27b 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -253,6 +253,19 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
253 if (err) 253 if (err)
254 goto err_out_unmap_rx; 254 goto err_out_unmap_rx;
255 255
256 if (np->chip_id == CHIP_IP1000A &&
257 (np->pdev->revision == 0x40 || np->pdev->revision == 0x41)) {
258 /* PHY magic taken from ipg driver, undocumented registers */
259 mii_write(dev, np->phy_addr, 31, 0x0001);
260 mii_write(dev, np->phy_addr, 27, 0x01e0);
261 mii_write(dev, np->phy_addr, 31, 0x0002);
262 mii_write(dev, np->phy_addr, 27, 0xeb8e);
263 mii_write(dev, np->phy_addr, 31, 0x0000);
264 mii_write(dev, np->phy_addr, 30, 0x005e);
265 /* advertise 1000BASE-T half & full duplex, prefer MASTER */
266 mii_write(dev, np->phy_addr, MII_CTRL1000, 0x0700);
267 }
268
256 /* Fiber device? */ 269 /* Fiber device? */
257 np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0; 270 np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0;
258 np->link_status = 0; 271 np->link_status = 0;
@@ -361,6 +374,11 @@ parse_eeprom (struct net_device *dev)
361 for (i = 0; i < 6; i++) 374 for (i = 0; i < 6; i++)
362 dev->dev_addr[i] = psrom->mac_addr[i]; 375 dev->dev_addr[i] = psrom->mac_addr[i];
363 376
377 if (np->chip_id == CHIP_IP1000A) {
378 np->led_mode = psrom->led_mode;
379 return 0;
380 }
381
364 if (np->pdev->vendor != PCI_VENDOR_ID_DLINK) { 382 if (np->pdev->vendor != PCI_VENDOR_ID_DLINK) {
365 return 0; 383 return 0;
366 } 384 }
@@ -406,6 +424,28 @@ parse_eeprom (struct net_device *dev)
406 return 0; 424 return 0;
407} 425}
408 426
427static void rio_set_led_mode(struct net_device *dev)
428{
429 struct netdev_private *np = netdev_priv(dev);
430 void __iomem *ioaddr = np->ioaddr;
431 u32 mode;
432
433 if (np->chip_id != CHIP_IP1000A)
434 return;
435
436 mode = dr32(ASICCtrl);
437 mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED);
438
439 if (np->led_mode & 0x01)
440 mode |= IPG_AC_LED_MODE;
441 if (np->led_mode & 0x02)
442 mode |= IPG_AC_LED_MODE_BIT_1;
443 if (np->led_mode & 0x08)
444 mode |= IPG_AC_LED_SPEED;
445
446 dw32(ASICCtrl, mode);
447}
448
409static int 449static int
410rio_open (struct net_device *dev) 450rio_open (struct net_device *dev)
411{ 451{
@@ -424,6 +464,8 @@ rio_open (struct net_device *dev)
424 GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset); 464 GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset);
425 mdelay(10); 465 mdelay(10);
426 466
467 rio_set_led_mode(dev);
468
427 /* DebugCtrl bit 4, 5, 9 must set */ 469 /* DebugCtrl bit 4, 5, 9 must set */
428 dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230); 470 dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230);
429 471
@@ -433,9 +475,13 @@ rio_open (struct net_device *dev)
433 475
434 alloc_list (dev); 476 alloc_list (dev);
435 477
436 /* Get station address */ 478 /* Set station address */
437 for (i = 0; i < 6; i++) 479 /* 16 or 32-bit access is required by TC9020 datasheet but 8-bit works
438 dw8(StationAddr0 + i, dev->dev_addr[i]); 480 * too. However, it doesn't work on IP1000A so we use 16-bit access.
481 */
482 for (i = 0; i < 3; i++)
483 dw16(StationAddr0 + 2 * i,
484 cpu_to_le16(((u16 *)dev->dev_addr)[i]));
439 485
440 set_multicast (dev); 486 set_multicast (dev);
441 if (np->coalesce) { 487 if (np->coalesce) {
@@ -780,6 +826,7 @@ tx_error (struct net_device *dev, int tx_status)
780 break; 826 break;
781 mdelay (1); 827 mdelay (1);
782 } 828 }
829 rio_set_led_mode(dev);
783 rio_free_tx (dev, 1); 830 rio_free_tx (dev, 1);
784 /* Reset TFDListPtr */ 831 /* Reset TFDListPtr */
785 dw32(TFDListPtr0, np->tx_ring_dma + 832 dw32(TFDListPtr0, np->tx_ring_dma +
@@ -799,6 +846,7 @@ tx_error (struct net_device *dev, int tx_status)
799 break; 846 break;
800 mdelay (1); 847 mdelay (1);
801 } 848 }
849 rio_set_led_mode(dev);
802 /* Let TxStartThresh stay default value */ 850 /* Let TxStartThresh stay default value */
803 } 851 }
804 /* Maximum Collisions */ 852 /* Maximum Collisions */
@@ -965,6 +1013,7 @@ rio_error (struct net_device *dev, int int_status)
965 dev->name, int_status); 1013 dev->name, int_status);
966 dw16(ASICCtrl + 2, GlobalReset | HostReset); 1014 dw16(ASICCtrl + 2, GlobalReset | HostReset);
967 mdelay (500); 1015 mdelay (500);
1016 rio_set_led_mode(dev);
968 } 1017 }
969} 1018}
970 1019
diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
index 23c07b007069..8f4f61262d5c 100644
--- a/drivers/net/ethernet/dlink/dl2k.h
+++ b/drivers/net/ethernet/dlink/dl2k.h
@@ -211,6 +211,10 @@ enum ASICCtrl_HiWord_bits {
211 ResetBusy = 0x0400, 211 ResetBusy = 0x0400,
212}; 212};
213 213
214#define IPG_AC_LED_MODE BIT(14)
215#define IPG_AC_LED_SPEED BIT(27)
216#define IPG_AC_LED_MODE_BIT_1 BIT(29)
217
214/* Transmit Frame Control bits */ 218/* Transmit Frame Control bits */
215enum TFC_bits { 219enum TFC_bits {
216 DwordAlign = 0x00000000, 220 DwordAlign = 0x00000000,
@@ -332,7 +336,10 @@ typedef struct t_SROM {
332 u16 asic_ctrl; /* 0x02 */ 336 u16 asic_ctrl; /* 0x02 */
333 u16 sub_vendor_id; /* 0x04 */ 337 u16 sub_vendor_id; /* 0x04 */
334 u16 sub_system_id; /* 0x06 */ 338 u16 sub_system_id; /* 0x06 */
335 u16 reserved1[12]; /* 0x08-0x1f */ 339 u16 pci_base_1; /* 0x08 (IP1000A only) */
340 u16 pci_base_2; /* 0x0a (IP1000A only) */
341 u16 led_mode; /* 0x0c (IP1000A only) */
342 u16 reserved1[9]; /* 0x0e-0x1f */
336 u8 mac_addr[6]; /* 0x20-0x25 */ 343 u8 mac_addr[6]; /* 0x20-0x25 */
337 u8 reserved2[10]; /* 0x26-0x2f */ 344 u8 reserved2[10]; /* 0x26-0x2f */
338 u8 sib[204]; /* 0x30-0xfb */ 345 u8 sib[204]; /* 0x30-0xfb */
@@ -397,6 +404,7 @@ struct netdev_private {
397 u16 advertising; /* NWay media advertisement */ 404 u16 advertising; /* NWay media advertisement */
398 u16 negotiate; /* Negotiated media */ 405 u16 negotiate; /* Negotiated media */
399 int phy_addr; /* PHY addresses. */ 406 int phy_addr; /* PHY addresses. */
407 u16 led_mode; /* LED mode read from EEPROM (IP1000A only) */
400}; 408};
401 409
402/* The station address location in the EEPROM. */ 410/* The station address location in the EEPROM. */
@@ -407,10 +415,15 @@ struct netdev_private {
407 class_mask of the class are honored during the comparison. 415 class_mask of the class are honored during the comparison.
408 driver_data Data private to the driver. 416 driver_data Data private to the driver.
409*/ 417*/
418#define CHIP_IP1000A 1
410 419
411static const struct pci_device_id rio_pci_tbl[] = { 420static const struct pci_device_id rio_pci_tbl[] = {
412 {0x1186, 0x4000, PCI_ANY_ID, PCI_ANY_ID, }, 421 {0x1186, 0x4000, PCI_ANY_ID, PCI_ANY_ID, },
413 {0x13f0, 0x1021, PCI_ANY_ID, PCI_ANY_ID, }, 422 {0x13f0, 0x1021, PCI_ANY_ID, PCI_ANY_ID, },
423 { PCI_VDEVICE(SUNDANCE, 0x1023), CHIP_IP1000A },
424 { PCI_VDEVICE(SUNDANCE, 0x2021), CHIP_IP1000A },
425 { PCI_VDEVICE(DLINK, 0x9021), CHIP_IP1000A },
426 { PCI_VDEVICE(DLINK, 0x4020), CHIP_IP1000A },
414 { } 427 { }
415}; 428};
416MODULE_DEVICE_TABLE (pci, rio_pci_tbl); 429MODULE_DEVICE_TABLE (pci, rio_pci_tbl);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index f4cb8e425853..734f655c99c1 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1062,9 +1062,7 @@ static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
1062static int be_set_rss_hash_opts(struct be_adapter *adapter, 1062static int be_set_rss_hash_opts(struct be_adapter *adapter,
1063 struct ethtool_rxnfc *cmd) 1063 struct ethtool_rxnfc *cmd)
1064{ 1064{
1065 struct be_rx_obj *rxo; 1065 int status;
1066 int status = 0, i, j;
1067 u8 rsstable[128];
1068 u32 rss_flags = adapter->rss_info.rss_flags; 1066 u32 rss_flags = adapter->rss_info.rss_flags;
1069 1067
1070 if (cmd->data != L3_RSS_FLAGS && 1068 if (cmd->data != L3_RSS_FLAGS &&
@@ -1113,20 +1111,11 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
1113 } 1111 }
1114 1112
1115 if (rss_flags == adapter->rss_info.rss_flags) 1113 if (rss_flags == adapter->rss_info.rss_flags)
1116 return status; 1114 return 0;
1117
1118 if (be_multi_rxq(adapter)) {
1119 for (j = 0; j < 128; j += adapter->num_rss_qs) {
1120 for_all_rss_queues(adapter, rxo, i) {
1121 if ((j + i) >= 128)
1122 break;
1123 rsstable[j + i] = rxo->rss_id;
1124 }
1125 }
1126 }
1127 1115
1128 status = be_cmd_rss_config(adapter, adapter->rss_info.rsstable, 1116 status = be_cmd_rss_config(adapter, adapter->rss_info.rsstable,
1129 rss_flags, 128, adapter->rss_info.rss_hkey); 1117 rss_flags, RSS_INDIR_TABLE_LEN,
1118 adapter->rss_info.rss_hkey);
1130 if (!status) 1119 if (!status)
1131 adapter->rss_info.rss_flags = rss_flags; 1120 adapter->rss_info.rss_flags = rss_flags;
1132 1121
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index eb48a977f8da..b6ad02909d6b 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -3518,7 +3518,7 @@ static int be_rx_qs_create(struct be_adapter *adapter)
3518 3518
3519 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN); 3519 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
3520 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags, 3520 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3521 128, rss_key); 3521 RSS_INDIR_TABLE_LEN, rss_key);
3522 if (rc) { 3522 if (rc) {
3523 rss->rss_flags = RSS_ENABLE_NONE; 3523 rss->rss_flags = RSS_ENABLE_NONE;
3524 return rc; 3524 return rc;
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index ff76d4e9dc1b..bee32a9d9876 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -7,7 +7,8 @@ config NET_VENDOR_FREESCALE
7 default y 7 default y
8 depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \ 8 depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \
9 M523x || M527x || M5272 || M528x || M520x || M532x || \ 9 M523x || M527x || M5272 || M528x || M520x || M532x || \
10 ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) 10 ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) || \
11 ARCH_LAYERSCAPE
11 ---help--- 12 ---help---
12 If you have a network (Ethernet) card belonging to this class, say Y. 13 If you have a network (Ethernet) card belonging to this class, say Y.
13 14
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 3e6b9b437497..7cf898455e60 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -647,9 +647,9 @@ static int gfar_parse_group(struct device_node *np,
647 if (model && strcasecmp(model, "FEC")) { 647 if (model && strcasecmp(model, "FEC")) {
648 gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1); 648 gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
649 gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2); 649 gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
650 if (gfar_irq(grp, TX)->irq == NO_IRQ || 650 if (!gfar_irq(grp, TX)->irq ||
651 gfar_irq(grp, RX)->irq == NO_IRQ || 651 !gfar_irq(grp, RX)->irq ||
652 gfar_irq(grp, ER)->irq == NO_IRQ) 652 !gfar_irq(grp, ER)->irq)
653 return -EINVAL; 653 return -EINVAL;
654 } 654 }
655 655
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 664d0c261269..b40fba929d65 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -467,7 +467,7 @@ static int gianfar_ptp_probe(struct platform_device *dev)
467 467
468 etsects->irq = platform_get_irq(dev, 0); 468 etsects->irq = platform_get_irq(dev, 0);
469 469
470 if (etsects->irq == NO_IRQ) { 470 if (etsects->irq < 0) {
471 pr_err("irq not in device tree\n"); 471 pr_err("irq not in device tree\n");
472 goto no_node; 472 goto no_node;
473 } 473 }
diff --git a/drivers/net/ethernet/icplus/Kconfig b/drivers/net/ethernet/icplus/Kconfig
deleted file mode 100644
index 14a66e9d2e26..000000000000
--- a/drivers/net/ethernet/icplus/Kconfig
+++ /dev/null
@@ -1,13 +0,0 @@
1#
2# IC Plus device configuration
3#
4
5config IP1000
6 tristate "IP1000 Gigabit Ethernet support"
7 depends on PCI
8 select MII
9 ---help---
10 This driver supports IP1000 gigabit Ethernet cards.
11
12 To compile this driver as a module, choose M here: the module
13 will be called ipg. This is recommended.
diff --git a/drivers/net/ethernet/icplus/Makefile b/drivers/net/ethernet/icplus/Makefile
deleted file mode 100644
index 5bc87c1f36aa..000000000000
--- a/drivers/net/ethernet/icplus/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
1#
2# Makefile for the IC Plus device drivers
3#
4
5obj-$(CONFIG_IP1000) += ipg.o
diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c
deleted file mode 100644
index c3b6af83f070..000000000000
--- a/drivers/net/ethernet/icplus/ipg.c
+++ /dev/null
@@ -1,2300 +0,0 @@
1/*
2 * ipg.c: Device Driver for the IP1000 Gigabit Ethernet Adapter
3 *
4 * Copyright (C) 2003, 2007 IC Plus Corp
5 *
6 * Original Author:
7 *
8 * Craig Rich
9 * Sundance Technology, Inc.
10 * www.sundanceti.com
11 * craig_rich@sundanceti.com
12 *
13 * Current Maintainer:
14 *
15 * Sorbica Shieh.
16 * http://www.icplus.com.tw
17 * sorbica@icplus.com.tw
18 *
19 * Jesse Huang
20 * http://www.icplus.com.tw
21 * jesse@icplus.com.tw
22 */
23
24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26#include <linux/crc32.h>
27#include <linux/ethtool.h>
28#include <linux/interrupt.h>
29#include <linux/gfp.h>
30#include <linux/mii.h>
31#include <linux/mutex.h>
32
33#include <asm/div64.h>
34
35#define IPG_RX_RING_BYTES (sizeof(struct ipg_rx) * IPG_RFDLIST_LENGTH)
36#define IPG_TX_RING_BYTES (sizeof(struct ipg_tx) * IPG_TFDLIST_LENGTH)
37#define IPG_RESET_MASK \
38 (IPG_AC_GLOBAL_RESET | IPG_AC_RX_RESET | IPG_AC_TX_RESET | \
39 IPG_AC_DMA | IPG_AC_FIFO | IPG_AC_NETWORK | IPG_AC_HOST | \
40 IPG_AC_AUTO_INIT)
41
42#define ipg_w32(val32, reg) iowrite32((val32), ioaddr + (reg))
43#define ipg_w16(val16, reg) iowrite16((val16), ioaddr + (reg))
44#define ipg_w8(val8, reg) iowrite8((val8), ioaddr + (reg))
45
46#define ipg_r32(reg) ioread32(ioaddr + (reg))
47#define ipg_r16(reg) ioread16(ioaddr + (reg))
48#define ipg_r8(reg) ioread8(ioaddr + (reg))
49
50enum {
51 netdev_io_size = 128
52};
53
54#include "ipg.h"
55#define DRV_NAME "ipg"
56
57MODULE_AUTHOR("IC Plus Corp. 2003");
58MODULE_DESCRIPTION("IC Plus IP1000 Gigabit Ethernet Adapter Linux Driver");
59MODULE_LICENSE("GPL");
60
61/*
62 * Defaults
63 */
64#define IPG_MAX_RXFRAME_SIZE 0x0600
65#define IPG_RXFRAG_SIZE 0x0600
66#define IPG_RXSUPPORT_SIZE 0x0600
67#define IPG_IS_JUMBO false
68
69/*
70 * Variable record -- index by leading revision/length
71 * Revision/Length(=N*4), Address1, Data1, Address2, Data2,...,AddressN,DataN
72 */
73static const unsigned short DefaultPhyParam[] = {
74 /* 11/12/03 IP1000A v1-3 rev=0x40 */
75 /*--------------------------------------------------------------------------
76 (0x4000|(15*4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 22, 0x85bd, 24, 0xfff2,
77 27, 0x0c10, 28, 0x0c10, 29, 0x2c10, 31, 0x0003, 23, 0x92f6,
78 31, 0x0000, 23, 0x003d, 30, 0x00de, 20, 0x20e7, 9, 0x0700,
79 --------------------------------------------------------------------------*/
80 /* 12/17/03 IP1000A v1-4 rev=0x40 */
81 (0x4000 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31,
82 0x0000,
83 30, 0x005e, 9, 0x0700,
84 /* 01/09/04 IP1000A v1-5 rev=0x41 */
85 (0x4100 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31,
86 0x0000,
87 30, 0x005e, 9, 0x0700,
88 0x0000
89};
90
91static const char * const ipg_brand_name[] = {
92 "IC PLUS IP1000 1000/100/10 based NIC",
93 "Sundance Technology ST2021 based NIC",
94 "Tamarack Microelectronics TC9020/9021 based NIC",
95 "D-Link NIC IP1000A"
96};
97
98static const struct pci_device_id ipg_pci_tbl[] = {
99 { PCI_VDEVICE(SUNDANCE, 0x1023), 0 },
100 { PCI_VDEVICE(SUNDANCE, 0x2021), 1 },
101 { PCI_VDEVICE(DLINK, 0x9021), 2 },
102 { PCI_VDEVICE(DLINK, 0x4020), 3 },
103 { 0, }
104};
105
106MODULE_DEVICE_TABLE(pci, ipg_pci_tbl);
107
108static inline void __iomem *ipg_ioaddr(struct net_device *dev)
109{
110 struct ipg_nic_private *sp = netdev_priv(dev);
111 return sp->ioaddr;
112}
113
114#ifdef IPG_DEBUG
115static void ipg_dump_rfdlist(struct net_device *dev)
116{
117 struct ipg_nic_private *sp = netdev_priv(dev);
118 void __iomem *ioaddr = sp->ioaddr;
119 unsigned int i;
120 u32 offset;
121
122 IPG_DEBUG_MSG("_dump_rfdlist\n");
123
124 netdev_info(dev, "rx_current = %02x\n", sp->rx_current);
125 netdev_info(dev, "rx_dirty = %02x\n", sp->rx_dirty);
126 netdev_info(dev, "RFDList start address = %016lx\n",
127 (unsigned long)sp->rxd_map);
128 netdev_info(dev, "RFDListPtr register = %08x%08x\n",
129 ipg_r32(IPG_RFDLISTPTR1), ipg_r32(IPG_RFDLISTPTR0));
130
131 for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
132 offset = (u32) &sp->rxd[i].next_desc - (u32) sp->rxd;
133 netdev_info(dev, "%02x %04x RFDNextPtr = %016lx\n",
134 i, offset, (unsigned long)sp->rxd[i].next_desc);
135 offset = (u32) &sp->rxd[i].rfs - (u32) sp->rxd;
136 netdev_info(dev, "%02x %04x RFS = %016lx\n",
137 i, offset, (unsigned long)sp->rxd[i].rfs);
138 offset = (u32) &sp->rxd[i].frag_info - (u32) sp->rxd;
139 netdev_info(dev, "%02x %04x frag_info = %016lx\n",
140 i, offset, (unsigned long)sp->rxd[i].frag_info);
141 }
142}
143
144static void ipg_dump_tfdlist(struct net_device *dev)
145{
146 struct ipg_nic_private *sp = netdev_priv(dev);
147 void __iomem *ioaddr = sp->ioaddr;
148 unsigned int i;
149 u32 offset;
150
151 IPG_DEBUG_MSG("_dump_tfdlist\n");
152
153 netdev_info(dev, "tx_current = %02x\n", sp->tx_current);
154 netdev_info(dev, "tx_dirty = %02x\n", sp->tx_dirty);
155 netdev_info(dev, "TFDList start address = %016lx\n",
156 (unsigned long) sp->txd_map);
157 netdev_info(dev, "TFDListPtr register = %08x%08x\n",
158 ipg_r32(IPG_TFDLISTPTR1), ipg_r32(IPG_TFDLISTPTR0));
159
160 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
161 offset = (u32) &sp->txd[i].next_desc - (u32) sp->txd;
162 netdev_info(dev, "%02x %04x TFDNextPtr = %016lx\n",
163 i, offset, (unsigned long)sp->txd[i].next_desc);
164
165 offset = (u32) &sp->txd[i].tfc - (u32) sp->txd;
166 netdev_info(dev, "%02x %04x TFC = %016lx\n",
167 i, offset, (unsigned long) sp->txd[i].tfc);
168 offset = (u32) &sp->txd[i].frag_info - (u32) sp->txd;
169 netdev_info(dev, "%02x %04x frag_info = %016lx\n",
170 i, offset, (unsigned long) sp->txd[i].frag_info);
171 }
172}
173#endif
174
175static void ipg_write_phy_ctl(void __iomem *ioaddr, u8 data)
176{
177 ipg_w8(IPG_PC_RSVD_MASK & data, PHY_CTRL);
178 ndelay(IPG_PC_PHYCTRLWAIT_NS);
179}
180
181static void ipg_drive_phy_ctl_low_high(void __iomem *ioaddr, u8 data)
182{
183 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | data);
184 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | data);
185}
186
187static void send_three_state(void __iomem *ioaddr, u8 phyctrlpolarity)
188{
189 phyctrlpolarity |= (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR;
190
191 ipg_drive_phy_ctl_low_high(ioaddr, phyctrlpolarity);
192}
193
194static void send_end(void __iomem *ioaddr, u8 phyctrlpolarity)
195{
196 ipg_w8((IPG_PC_MGMTCLK_LO | (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR |
197 phyctrlpolarity) & IPG_PC_RSVD_MASK, PHY_CTRL);
198}
199
200static u16 read_phy_bit(void __iomem *ioaddr, u8 phyctrlpolarity)
201{
202 u16 bit_data;
203
204 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | phyctrlpolarity);
205
206 bit_data = ((ipg_r8(PHY_CTRL) & IPG_PC_MGMTDATA) >> 1) & 1;
207
208 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | phyctrlpolarity);
209
210 return bit_data;
211}
212
213/*
214 * Read a register from the Physical Layer device located
215 * on the IPG NIC, using the IPG PHYCTRL register.
216 */
217static int mdio_read(struct net_device *dev, int phy_id, int phy_reg)
218{
219 void __iomem *ioaddr = ipg_ioaddr(dev);
220 /*
221 * The GMII mangement frame structure for a read is as follows:
222 *
223 * |Preamble|st|op|phyad|regad|ta| data |idle|
224 * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z |
225 *
226 * <32 1s> = 32 consecutive logic 1 values
227 * A = bit of Physical Layer device address (MSB first)
228 * R = bit of register address (MSB first)
229 * z = High impedance state
230 * D = bit of read data (MSB first)
231 *
232 * Transmission order is 'Preamble' field first, bits transmitted
233 * left to right (first to last).
234 */
235 struct {
236 u32 field;
237 unsigned int len;
238 } p[] = {
239 { GMII_PREAMBLE, 32 }, /* Preamble */
240 { GMII_ST, 2 }, /* ST */
241 { GMII_READ, 2 }, /* OP */
242 { phy_id, 5 }, /* PHYAD */
243 { phy_reg, 5 }, /* REGAD */
244 { 0x0000, 2 }, /* TA */
245 { 0x0000, 16 }, /* DATA */
246 { 0x0000, 1 } /* IDLE */
247 };
248 unsigned int i, j;
249 u8 polarity, data;
250
251 polarity = ipg_r8(PHY_CTRL);
252 polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY);
253
254 /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */
255 for (j = 0; j < 5; j++) {
256 for (i = 0; i < p[j].len; i++) {
257 /* For each variable length field, the MSB must be
258 * transmitted first. Rotate through the field bits,
259 * starting with the MSB, and move each bit into the
260 * the 1st (2^1) bit position (this is the bit position
261 * corresponding to the MgmtData bit of the PhyCtrl
262 * register for the IPG).
263 *
264 * Example: ST = 01;
265 *
266 * First write a '0' to bit 1 of the PhyCtrl
267 * register, then write a '1' to bit 1 of the
268 * PhyCtrl register.
269 *
270 * To do this, right shift the MSB of ST by the value:
271 * [field length - 1 - #ST bits already written]
272 * then left shift this result by 1.
273 */
274 data = (p[j].field >> (p[j].len - 1 - i)) << 1;
275 data &= IPG_PC_MGMTDATA;
276 data |= polarity | IPG_PC_MGMTDIR;
277
278 ipg_drive_phy_ctl_low_high(ioaddr, data);
279 }
280 }
281
282 send_three_state(ioaddr, polarity);
283
284 read_phy_bit(ioaddr, polarity);
285
286 /*
287 * For a read cycle, the bits for the next two fields (TA and
288 * DATA) are driven by the PHY (the IPG reads these bits).
289 */
290 for (i = 0; i < p[6].len; i++) {
291 p[6].field |=
292 (read_phy_bit(ioaddr, polarity) << (p[6].len - 1 - i));
293 }
294
295 send_three_state(ioaddr, polarity);
296 send_three_state(ioaddr, polarity);
297 send_three_state(ioaddr, polarity);
298 send_end(ioaddr, polarity);
299
300 /* Return the value of the DATA field. */
301 return p[6].field;
302}
303
304/*
305 * Write to a register from the Physical Layer device located
306 * on the IPG NIC, using the IPG PHYCTRL register.
307 */
308static void mdio_write(struct net_device *dev, int phy_id, int phy_reg, int val)
309{
310 void __iomem *ioaddr = ipg_ioaddr(dev);
311 /*
312 * The GMII mangement frame structure for a read is as follows:
313 *
314 * |Preamble|st|op|phyad|regad|ta| data |idle|
315 * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z |
316 *
317 * <32 1s> = 32 consecutive logic 1 values
318 * A = bit of Physical Layer device address (MSB first)
319 * R = bit of register address (MSB first)
320 * z = High impedance state
321 * D = bit of write data (MSB first)
322 *
323 * Transmission order is 'Preamble' field first, bits transmitted
324 * left to right (first to last).
325 */
326 struct {
327 u32 field;
328 unsigned int len;
329 } p[] = {
330 { GMII_PREAMBLE, 32 }, /* Preamble */
331 { GMII_ST, 2 }, /* ST */
332 { GMII_WRITE, 2 }, /* OP */
333 { phy_id, 5 }, /* PHYAD */
334 { phy_reg, 5 }, /* REGAD */
335 { 0x0002, 2 }, /* TA */
336 { val & 0xffff, 16 }, /* DATA */
337 { 0x0000, 1 } /* IDLE */
338 };
339 unsigned int i, j;
340 u8 polarity, data;
341
342 polarity = ipg_r8(PHY_CTRL);
343 polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY);
344
345 /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */
346 for (j = 0; j < 7; j++) {
347 for (i = 0; i < p[j].len; i++) {
348 /* For each variable length field, the MSB must be
349 * transmitted first. Rotate through the field bits,
350 * starting with the MSB, and move each bit into the
351 * the 1st (2^1) bit position (this is the bit position
352 * corresponding to the MgmtData bit of the PhyCtrl
353 * register for the IPG).
354 *
355 * Example: ST = 01;
356 *
357 * First write a '0' to bit 1 of the PhyCtrl
358 * register, then write a '1' to bit 1 of the
359 * PhyCtrl register.
360 *
361 * To do this, right shift the MSB of ST by the value:
362 * [field length - 1 - #ST bits already written]
363 * then left shift this result by 1.
364 */
365 data = (p[j].field >> (p[j].len - 1 - i)) << 1;
366 data &= IPG_PC_MGMTDATA;
367 data |= polarity | IPG_PC_MGMTDIR;
368
369 ipg_drive_phy_ctl_low_high(ioaddr, data);
370 }
371 }
372
373 /* The last cycle is a tri-state, so read from the PHY. */
374 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity);
375 ipg_r8(PHY_CTRL);
376 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity);
377}
378
379static void ipg_set_led_mode(struct net_device *dev)
380{
381 struct ipg_nic_private *sp = netdev_priv(dev);
382 void __iomem *ioaddr = sp->ioaddr;
383 u32 mode;
384
385 mode = ipg_r32(ASIC_CTRL);
386 mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED);
387
388 if ((sp->led_mode & 0x03) > 1)
389 mode |= IPG_AC_LED_MODE_BIT_1; /* Write Asic Control Bit 29 */
390
391 if ((sp->led_mode & 0x01) == 1)
392 mode |= IPG_AC_LED_MODE; /* Write Asic Control Bit 14 */
393
394 if ((sp->led_mode & 0x08) == 8)
395 mode |= IPG_AC_LED_SPEED; /* Write Asic Control Bit 27 */
396
397 ipg_w32(mode, ASIC_CTRL);
398}
399
400static void ipg_set_phy_set(struct net_device *dev)
401{
402 struct ipg_nic_private *sp = netdev_priv(dev);
403 void __iomem *ioaddr = sp->ioaddr;
404 int physet;
405
406 physet = ipg_r8(PHY_SET);
407 physet &= ~(IPG_PS_MEM_LENB9B | IPG_PS_MEM_LEN9 | IPG_PS_NON_COMPDET);
408 physet |= ((sp->led_mode & 0x70) >> 4);
409 ipg_w8(physet, PHY_SET);
410}
411
412static int ipg_reset(struct net_device *dev, u32 resetflags)
413{
414 /* Assert functional resets via the IPG AsicCtrl
415 * register as specified by the 'resetflags' input
416 * parameter.
417 */
418 void __iomem *ioaddr = ipg_ioaddr(dev);
419 unsigned int timeout_count = 0;
420
421 IPG_DEBUG_MSG("_reset\n");
422
423 ipg_w32(ipg_r32(ASIC_CTRL) | resetflags, ASIC_CTRL);
424
425 /* Delay added to account for problem with 10Mbps reset. */
426 mdelay(IPG_AC_RESETWAIT);
427
428 while (IPG_AC_RESET_BUSY & ipg_r32(ASIC_CTRL)) {
429 mdelay(IPG_AC_RESETWAIT);
430 if (++timeout_count > IPG_AC_RESET_TIMEOUT)
431 return -ETIME;
432 }
433 /* Set LED Mode in Asic Control */
434 ipg_set_led_mode(dev);
435
436 /* Set PHYSet Register Value */
437 ipg_set_phy_set(dev);
438 return 0;
439}
440
441/* Find the GMII PHY address. */
442static int ipg_find_phyaddr(struct net_device *dev)
443{
444 unsigned int phyaddr, i;
445
446 for (i = 0; i < 32; i++) {
447 u32 status;
448
449 /* Search for the correct PHY address among 32 possible. */
450 phyaddr = (IPG_NIC_PHY_ADDRESS + i) % 32;
451
452 /* 10/22/03 Grace change verify from GMII_PHY_STATUS to
453 GMII_PHY_ID1
454 */
455
456 status = mdio_read(dev, phyaddr, MII_BMSR);
457
458 if ((status != 0xFFFF) && (status != 0))
459 return phyaddr;
460 }
461
462 return 0x1f;
463}
464
465/*
466 * Configure IPG based on result of IEEE 802.3 PHY
467 * auto-negotiation.
468 */
469static int ipg_config_autoneg(struct net_device *dev)
470{
471 struct ipg_nic_private *sp = netdev_priv(dev);
472 void __iomem *ioaddr = sp->ioaddr;
473 unsigned int txflowcontrol;
474 unsigned int rxflowcontrol;
475 unsigned int fullduplex;
476 u32 mac_ctrl_val;
477 u32 asicctrl;
478 u8 phyctrl;
479 const char *speed;
480 const char *duplex;
481 const char *tx_desc;
482 const char *rx_desc;
483
484 IPG_DEBUG_MSG("_config_autoneg\n");
485
486 asicctrl = ipg_r32(ASIC_CTRL);
487 phyctrl = ipg_r8(PHY_CTRL);
488 mac_ctrl_val = ipg_r32(MAC_CTRL);
489
490 /* Set flags for use in resolving auto-negotiation, assuming
491 * non-1000Mbps, half duplex, no flow control.
492 */
493 fullduplex = 0;
494 txflowcontrol = 0;
495 rxflowcontrol = 0;
496
497 /* To accommodate a problem in 10Mbps operation,
498 * set a global flag if PHY running in 10Mbps mode.
499 */
500 sp->tenmbpsmode = 0;
501
502 /* Determine actual speed of operation. */
503 switch (phyctrl & IPG_PC_LINK_SPEED) {
504 case IPG_PC_LINK_SPEED_10MBPS:
505 speed = "10Mbps";
506 sp->tenmbpsmode = 1;
507 break;
508 case IPG_PC_LINK_SPEED_100MBPS:
509 speed = "100Mbps";
510 break;
511 case IPG_PC_LINK_SPEED_1000MBPS:
512 speed = "1000Mbps";
513 break;
514 default:
515 speed = "undefined!";
516 return 0;
517 }
518
519 netdev_info(dev, "Link speed = %s\n", speed);
520 if (sp->tenmbpsmode == 1)
521 netdev_info(dev, "10Mbps operational mode enabled\n");
522
523 if (phyctrl & IPG_PC_DUPLEX_STATUS) {
524 fullduplex = 1;
525 txflowcontrol = 1;
526 rxflowcontrol = 1;
527 }
528
529 /* Configure full duplex, and flow control. */
530 if (fullduplex == 1) {
531
532 /* Configure IPG for full duplex operation. */
533
534 duplex = "full";
535
536 mac_ctrl_val |= IPG_MC_DUPLEX_SELECT_FD;
537
538 if (txflowcontrol == 1) {
539 tx_desc = "";
540 mac_ctrl_val |= IPG_MC_TX_FLOW_CONTROL_ENABLE;
541 } else {
542 tx_desc = "no ";
543 mac_ctrl_val &= ~IPG_MC_TX_FLOW_CONTROL_ENABLE;
544 }
545
546 if (rxflowcontrol == 1) {
547 rx_desc = "";
548 mac_ctrl_val |= IPG_MC_RX_FLOW_CONTROL_ENABLE;
549 } else {
550 rx_desc = "no ";
551 mac_ctrl_val &= ~IPG_MC_RX_FLOW_CONTROL_ENABLE;
552 }
553 } else {
554 duplex = "half";
555 tx_desc = "no ";
556 rx_desc = "no ";
557 mac_ctrl_val &= (~IPG_MC_DUPLEX_SELECT_FD &
558 ~IPG_MC_TX_FLOW_CONTROL_ENABLE &
559 ~IPG_MC_RX_FLOW_CONTROL_ENABLE);
560 }
561
562 netdev_info(dev, "setting %s duplex, %sTX, %sRX flow control\n",
563 duplex, tx_desc, rx_desc);
564 ipg_w32(mac_ctrl_val, MAC_CTRL);
565
566 return 0;
567}
568
569/* Determine and configure multicast operation and set
570 * receive mode for IPG.
571 */
572static void ipg_nic_set_multicast_list(struct net_device *dev)
573{
574 void __iomem *ioaddr = ipg_ioaddr(dev);
575 struct netdev_hw_addr *ha;
576 unsigned int hashindex;
577 u32 hashtable[2];
578 u8 receivemode;
579
580 IPG_DEBUG_MSG("_nic_set_multicast_list\n");
581
582 receivemode = IPG_RM_RECEIVEUNICAST | IPG_RM_RECEIVEBROADCAST;
583
584 if (dev->flags & IFF_PROMISC) {
585 /* NIC to be configured in promiscuous mode. */
586 receivemode = IPG_RM_RECEIVEALLFRAMES;
587 } else if ((dev->flags & IFF_ALLMULTI) ||
588 ((dev->flags & IFF_MULTICAST) &&
589 (netdev_mc_count(dev) > IPG_MULTICAST_HASHTABLE_SIZE))) {
590 /* NIC to be configured to receive all multicast
591 * frames. */
592 receivemode |= IPG_RM_RECEIVEMULTICAST;
593 } else if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) {
594 /* NIC to be configured to receive selected
595 * multicast addresses. */
596 receivemode |= IPG_RM_RECEIVEMULTICASTHASH;
597 }
598
599 /* Calculate the bits to set for the 64 bit, IPG HASHTABLE.
600 * The IPG applies a cyclic-redundancy-check (the same CRC
601 * used to calculate the frame data FCS) to the destination
602 * address all incoming multicast frames whose destination
603 * address has the multicast bit set. The least significant
604 * 6 bits of the CRC result are used as an addressing index
605 * into the hash table. If the value of the bit addressed by
606 * this index is a 1, the frame is passed to the host system.
607 */
608
609 /* Clear hashtable. */
610 hashtable[0] = 0x00000000;
611 hashtable[1] = 0x00000000;
612
613 /* Cycle through all multicast addresses to filter. */
614 netdev_for_each_mc_addr(ha, dev) {
615 /* Calculate CRC result for each multicast address. */
616 hashindex = crc32_le(0xffffffff, ha->addr,
617 ETH_ALEN);
618
619 /* Use only the least significant 6 bits. */
620 hashindex = hashindex & 0x3F;
621
622 /* Within "hashtable", set bit number "hashindex"
623 * to a logic 1.
624 */
625 set_bit(hashindex, (void *)hashtable);
626 }
627
628 /* Write the value of the hashtable, to the 4, 16 bit
629 * HASHTABLE IPG registers.
630 */
631 ipg_w32(hashtable[0], HASHTABLE_0);
632 ipg_w32(hashtable[1], HASHTABLE_1);
633
634 ipg_w8(IPG_RM_RSVD_MASK & receivemode, RECEIVE_MODE);
635
636 IPG_DEBUG_MSG("ReceiveMode = %x\n", ipg_r8(RECEIVE_MODE));
637}
638
639static int ipg_io_config(struct net_device *dev)
640{
641 struct ipg_nic_private *sp = netdev_priv(dev);
642 void __iomem *ioaddr = ipg_ioaddr(dev);
643 u32 origmacctrl;
644 u32 restoremacctrl;
645
646 IPG_DEBUG_MSG("_io_config\n");
647
648 origmacctrl = ipg_r32(MAC_CTRL);
649
650 restoremacctrl = origmacctrl | IPG_MC_STATISTICS_ENABLE;
651
652 /* Based on compilation option, determine if FCS is to be
653 * stripped on receive frames by IPG.
654 */
655 if (!IPG_STRIP_FCS_ON_RX)
656 restoremacctrl |= IPG_MC_RCV_FCS;
657
658 /* Determine if transmitter and/or receiver are
659 * enabled so we may restore MACCTRL correctly.
660 */
661 if (origmacctrl & IPG_MC_TX_ENABLED)
662 restoremacctrl |= IPG_MC_TX_ENABLE;
663
664 if (origmacctrl & IPG_MC_RX_ENABLED)
665 restoremacctrl |= IPG_MC_RX_ENABLE;
666
667 /* Transmitter and receiver must be disabled before setting
668 * IFSSelect.
669 */
670 ipg_w32((origmacctrl & (IPG_MC_RX_DISABLE | IPG_MC_TX_DISABLE)) &
671 IPG_MC_RSVD_MASK, MAC_CTRL);
672
673 /* Now that transmitter and receiver are disabled, write
674 * to IFSSelect.
675 */
676 ipg_w32((origmacctrl & IPG_MC_IFS_96BIT) & IPG_MC_RSVD_MASK, MAC_CTRL);
677
678 /* Set RECEIVEMODE register. */
679 ipg_nic_set_multicast_list(dev);
680
681 ipg_w16(sp->max_rxframe_size, MAX_FRAME_SIZE);
682
683 ipg_w8(IPG_RXDMAPOLLPERIOD_VALUE, RX_DMA_POLL_PERIOD);
684 ipg_w8(IPG_RXDMAURGENTTHRESH_VALUE, RX_DMA_URGENT_THRESH);
685 ipg_w8(IPG_RXDMABURSTTHRESH_VALUE, RX_DMA_BURST_THRESH);
686 ipg_w8(IPG_TXDMAPOLLPERIOD_VALUE, TX_DMA_POLL_PERIOD);
687 ipg_w8(IPG_TXDMAURGENTTHRESH_VALUE, TX_DMA_URGENT_THRESH);
688 ipg_w8(IPG_TXDMABURSTTHRESH_VALUE, TX_DMA_BURST_THRESH);
689 ipg_w16((IPG_IE_HOST_ERROR | IPG_IE_TX_DMA_COMPLETE |
690 IPG_IE_TX_COMPLETE | IPG_IE_INT_REQUESTED |
691 IPG_IE_UPDATE_STATS | IPG_IE_LINK_EVENT |
692 IPG_IE_RX_DMA_COMPLETE | IPG_IE_RX_DMA_PRIORITY), INT_ENABLE);
693 ipg_w16(IPG_FLOWONTHRESH_VALUE, FLOW_ON_THRESH);
694 ipg_w16(IPG_FLOWOFFTHRESH_VALUE, FLOW_OFF_THRESH);
695
696 /* IPG multi-frag frame bug workaround.
697 * Per silicon revision B3 eratta.
698 */
699 ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0200, DEBUG_CTRL);
700
701 /* IPG TX poll now bug workaround.
702 * Per silicon revision B3 eratta.
703 */
704 ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0010, DEBUG_CTRL);
705
706 /* IPG RX poll now bug workaround.
707 * Per silicon revision B3 eratta.
708 */
709 ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0020, DEBUG_CTRL);
710
711 /* Now restore MACCTRL to original setting. */
712 ipg_w32(IPG_MC_RSVD_MASK & restoremacctrl, MAC_CTRL);
713
714 /* Disable unused RMON statistics. */
715 ipg_w32(IPG_RZ_ALL, RMON_STATISTICS_MASK);
716
717 /* Disable unused MIB statistics. */
718 ipg_w32(IPG_SM_MACCONTROLFRAMESXMTD | IPG_SM_MACCONTROLFRAMESRCVD |
719 IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK | IPG_SM_TXJUMBOFRAMES |
720 IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK | IPG_SM_RXJUMBOFRAMES |
721 IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK |
722 IPG_SM_UDPCHECKSUMERRORS | IPG_SM_TCPCHECKSUMERRORS |
723 IPG_SM_IPCHECKSUMERRORS, STATISTICS_MASK);
724
725 return 0;
726}
727
728/*
729 * Create a receive buffer within system memory and update
730 * NIC private structure appropriately.
731 */
732static int ipg_get_rxbuff(struct net_device *dev, int entry)
733{
734 struct ipg_nic_private *sp = netdev_priv(dev);
735 struct ipg_rx *rxfd = sp->rxd + entry;
736 struct sk_buff *skb;
737 u64 rxfragsize;
738
739 IPG_DEBUG_MSG("_get_rxbuff\n");
740
741 skb = netdev_alloc_skb_ip_align(dev, sp->rxsupport_size);
742 if (!skb) {
743 sp->rx_buff[entry] = NULL;
744 return -ENOMEM;
745 }
746
747 /* Save the address of the sk_buff structure. */
748 sp->rx_buff[entry] = skb;
749
750 rxfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data,
751 sp->rx_buf_sz, PCI_DMA_FROMDEVICE));
752
753 /* Set the RFD fragment length. */
754 rxfragsize = sp->rxfrag_size;
755 rxfd->frag_info |= cpu_to_le64((rxfragsize << 48) & IPG_RFI_FRAGLEN);
756
757 return 0;
758}
759
760static int init_rfdlist(struct net_device *dev)
761{
762 struct ipg_nic_private *sp = netdev_priv(dev);
763 void __iomem *ioaddr = sp->ioaddr;
764 unsigned int i;
765
766 IPG_DEBUG_MSG("_init_rfdlist\n");
767
768 for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
769 struct ipg_rx *rxfd = sp->rxd + i;
770
771 if (sp->rx_buff[i]) {
772 pci_unmap_single(sp->pdev,
773 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
774 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
775 dev_kfree_skb_irq(sp->rx_buff[i]);
776 sp->rx_buff[i] = NULL;
777 }
778
779 /* Clear out the RFS field. */
780 rxfd->rfs = 0x0000000000000000;
781
782 if (ipg_get_rxbuff(dev, i) < 0) {
783 /*
784 * A receive buffer was not ready, break the
785 * RFD list here.
786 */
787 IPG_DEBUG_MSG("Cannot allocate Rx buffer\n");
788
789 /* Just in case we cannot allocate a single RFD.
790 * Should not occur.
791 */
792 if (i == 0) {
793 netdev_err(dev, "No memory available for RFD list\n");
794 return -ENOMEM;
795 }
796 }
797
798 rxfd->next_desc = cpu_to_le64(sp->rxd_map +
799 sizeof(struct ipg_rx)*(i + 1));
800 }
801 sp->rxd[i - 1].next_desc = cpu_to_le64(sp->rxd_map);
802
803 sp->rx_current = 0;
804 sp->rx_dirty = 0;
805
806 /* Write the location of the RFDList to the IPG. */
807 ipg_w32((u32) sp->rxd_map, RFD_LIST_PTR_0);
808 ipg_w32(0x00000000, RFD_LIST_PTR_1);
809
810 return 0;
811}
812
813static void init_tfdlist(struct net_device *dev)
814{
815 struct ipg_nic_private *sp = netdev_priv(dev);
816 void __iomem *ioaddr = sp->ioaddr;
817 unsigned int i;
818
819 IPG_DEBUG_MSG("_init_tfdlist\n");
820
821 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
822 struct ipg_tx *txfd = sp->txd + i;
823
824 txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE);
825
826 if (sp->tx_buff[i]) {
827 dev_kfree_skb_irq(sp->tx_buff[i]);
828 sp->tx_buff[i] = NULL;
829 }
830
831 txfd->next_desc = cpu_to_le64(sp->txd_map +
832 sizeof(struct ipg_tx)*(i + 1));
833 }
834 sp->txd[i - 1].next_desc = cpu_to_le64(sp->txd_map);
835
836 sp->tx_current = 0;
837 sp->tx_dirty = 0;
838
839 /* Write the location of the TFDList to the IPG. */
840 IPG_DDEBUG_MSG("Starting TFDListPtr = %08x\n",
841 (u32) sp->txd_map);
842 ipg_w32((u32) sp->txd_map, TFD_LIST_PTR_0);
843 ipg_w32(0x00000000, TFD_LIST_PTR_1);
844
845 sp->reset_current_tfd = 1;
846}
847
848/*
849 * Free all transmit buffers which have already been transferred
850 * via DMA to the IPG.
851 */
852static void ipg_nic_txfree(struct net_device *dev)
853{
854 struct ipg_nic_private *sp = netdev_priv(dev);
855 unsigned int released, pending, dirty;
856
857 IPG_DEBUG_MSG("_nic_txfree\n");
858
859 pending = sp->tx_current - sp->tx_dirty;
860 dirty = sp->tx_dirty % IPG_TFDLIST_LENGTH;
861
862 for (released = 0; released < pending; released++) {
863 struct sk_buff *skb = sp->tx_buff[dirty];
864 struct ipg_tx *txfd = sp->txd + dirty;
865
866 IPG_DEBUG_MSG("TFC = %016lx\n", (unsigned long) txfd->tfc);
867
868 /* Look at each TFD's TFC field beginning
869 * at the last freed TFD up to the current TFD.
870 * If the TFDDone bit is set, free the associated
871 * buffer.
872 */
873 if (!(txfd->tfc & cpu_to_le64(IPG_TFC_TFDDONE)))
874 break;
875
876 /* Free the transmit buffer. */
877 if (skb) {
878 pci_unmap_single(sp->pdev,
879 le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN,
880 skb->len, PCI_DMA_TODEVICE);
881
882 dev_kfree_skb_irq(skb);
883
884 sp->tx_buff[dirty] = NULL;
885 }
886 dirty = (dirty + 1) % IPG_TFDLIST_LENGTH;
887 }
888
889 sp->tx_dirty += released;
890
891 if (netif_queue_stopped(dev) &&
892 (sp->tx_current != (sp->tx_dirty + IPG_TFDLIST_LENGTH))) {
893 netif_wake_queue(dev);
894 }
895}
896
897static void ipg_tx_timeout(struct net_device *dev)
898{
899 struct ipg_nic_private *sp = netdev_priv(dev);
900 void __iomem *ioaddr = sp->ioaddr;
901
902 ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA | IPG_AC_NETWORK |
903 IPG_AC_FIFO);
904
905 spin_lock_irq(&sp->lock);
906
907 /* Re-configure after DMA reset. */
908 if (ipg_io_config(dev) < 0)
909 netdev_info(dev, "Error during re-configuration\n");
910
911 init_tfdlist(dev);
912
913 spin_unlock_irq(&sp->lock);
914
915 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & IPG_MC_RSVD_MASK,
916 MAC_CTRL);
917}
918
919/*
920 * For TxComplete interrupts, free all transmit
921 * buffers which have already been transferred via DMA
922 * to the IPG.
923 */
924static void ipg_nic_txcleanup(struct net_device *dev)
925{
926 struct ipg_nic_private *sp = netdev_priv(dev);
927 void __iomem *ioaddr = sp->ioaddr;
928 unsigned int i;
929
930 IPG_DEBUG_MSG("_nic_txcleanup\n");
931
932 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
933 /* Reading the TXSTATUS register clears the
934 * TX_COMPLETE interrupt.
935 */
936 u32 txstatusdword = ipg_r32(TX_STATUS);
937
938 IPG_DEBUG_MSG("TxStatus = %08x\n", txstatusdword);
939
940 /* Check for Transmit errors. Error bits only valid if
941 * TX_COMPLETE bit in the TXSTATUS register is a 1.
942 */
943 if (!(txstatusdword & IPG_TS_TX_COMPLETE))
944 break;
945
946 /* If in 10Mbps mode, indicate transmit is ready. */
947 if (sp->tenmbpsmode) {
948 netif_wake_queue(dev);
949 }
950
951 /* Transmit error, increment stat counters. */
952 if (txstatusdword & IPG_TS_TX_ERROR) {
953 IPG_DEBUG_MSG("Transmit error\n");
954 sp->stats.tx_errors++;
955 }
956
957 /* Late collision, re-enable transmitter. */
958 if (txstatusdword & IPG_TS_LATE_COLLISION) {
959 IPG_DEBUG_MSG("Late collision on transmit\n");
960 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) &
961 IPG_MC_RSVD_MASK, MAC_CTRL);
962 }
963
964 /* Maximum collisions, re-enable transmitter. */
965 if (txstatusdword & IPG_TS_TX_MAX_COLL) {
966 IPG_DEBUG_MSG("Maximum collisions on transmit\n");
967 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) &
968 IPG_MC_RSVD_MASK, MAC_CTRL);
969 }
970
971 /* Transmit underrun, reset and re-enable
972 * transmitter.
973 */
974 if (txstatusdword & IPG_TS_TX_UNDERRUN) {
975 IPG_DEBUG_MSG("Transmitter underrun\n");
976 sp->stats.tx_fifo_errors++;
977 ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA |
978 IPG_AC_NETWORK | IPG_AC_FIFO);
979
980 /* Re-configure after DMA reset. */
981 if (ipg_io_config(dev) < 0) {
982 netdev_info(dev, "Error during re-configuration\n");
983 }
984 init_tfdlist(dev);
985
986 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) &
987 IPG_MC_RSVD_MASK, MAC_CTRL);
988 }
989 }
990
991 ipg_nic_txfree(dev);
992}
993
994/* Provides statistical information about the IPG NIC. */
995static struct net_device_stats *ipg_nic_get_stats(struct net_device *dev)
996{
997 struct ipg_nic_private *sp = netdev_priv(dev);
998 void __iomem *ioaddr = sp->ioaddr;
999 u16 temp1;
1000 u16 temp2;
1001
1002 IPG_DEBUG_MSG("_nic_get_stats\n");
1003
1004 /* Check to see if the NIC has been initialized via nic_open,
1005 * before trying to read statistic registers.
1006 */
1007 if (!netif_running(dev))
1008 return &sp->stats;
1009
1010 sp->stats.rx_packets += ipg_r32(IPG_FRAMESRCVDOK);
1011 sp->stats.tx_packets += ipg_r32(IPG_FRAMESXMTDOK);
1012 sp->stats.rx_bytes += ipg_r32(IPG_OCTETRCVOK);
1013 sp->stats.tx_bytes += ipg_r32(IPG_OCTETXMTOK);
1014 temp1 = ipg_r16(IPG_FRAMESLOSTRXERRORS);
1015 sp->stats.rx_errors += temp1;
1016 sp->stats.rx_missed_errors += temp1;
1017 temp1 = ipg_r32(IPG_SINGLECOLFRAMES) + ipg_r32(IPG_MULTICOLFRAMES) +
1018 ipg_r32(IPG_LATECOLLISIONS);
1019 temp2 = ipg_r16(IPG_CARRIERSENSEERRORS);
1020 sp->stats.collisions += temp1;
1021 sp->stats.tx_dropped += ipg_r16(IPG_FRAMESABORTXSCOLLS);
1022 sp->stats.tx_errors += ipg_r16(IPG_FRAMESWEXDEFERRAL) +
1023 ipg_r32(IPG_FRAMESWDEFERREDXMT) + temp1 + temp2;
1024 sp->stats.multicast += ipg_r32(IPG_MCSTOCTETRCVDOK);
1025
1026 /* detailed tx_errors */
1027 sp->stats.tx_carrier_errors += temp2;
1028
1029 /* detailed rx_errors */
1030 sp->stats.rx_length_errors += ipg_r16(IPG_INRANGELENGTHERRORS) +
1031 ipg_r16(IPG_FRAMETOOLONGERRORS);
1032 sp->stats.rx_crc_errors += ipg_r16(IPG_FRAMECHECKSEQERRORS);
1033
1034 /* Unutilized IPG statistic registers. */
1035 ipg_r32(IPG_MCSTFRAMESRCVDOK);
1036
1037 return &sp->stats;
1038}
1039
1040/* Restore used receive buffers. */
1041static int ipg_nic_rxrestore(struct net_device *dev)
1042{
1043 struct ipg_nic_private *sp = netdev_priv(dev);
1044 const unsigned int curr = sp->rx_current;
1045 unsigned int dirty = sp->rx_dirty;
1046
1047 IPG_DEBUG_MSG("_nic_rxrestore\n");
1048
1049 for (dirty = sp->rx_dirty; curr - dirty > 0; dirty++) {
1050 unsigned int entry = dirty % IPG_RFDLIST_LENGTH;
1051
1052 /* rx_copybreak may poke hole here and there. */
1053 if (sp->rx_buff[entry])
1054 continue;
1055
1056 /* Generate a new receive buffer to replace the
1057 * current buffer (which will be released by the
1058 * Linux system).
1059 */
1060 if (ipg_get_rxbuff(dev, entry) < 0) {
1061 IPG_DEBUG_MSG("Cannot allocate new Rx buffer\n");
1062
1063 break;
1064 }
1065
1066 /* Reset the RFS field. */
1067 sp->rxd[entry].rfs = 0x0000000000000000;
1068 }
1069 sp->rx_dirty = dirty;
1070
1071 return 0;
1072}
1073
1074/* use jumboindex and jumbosize to control jumbo frame status
1075 * initial status is jumboindex=-1 and jumbosize=0
1076 * 1. jumboindex = -1 and jumbosize=0 : previous jumbo frame has been done.
1077 * 2. jumboindex != -1 and jumbosize != 0 : jumbo frame is not over size and receiving
1078 * 3. jumboindex = -1 and jumbosize != 0 : jumbo frame is over size, already dump
1079 * previous receiving and need to continue dumping the current one
1080 */
1081enum {
1082 NORMAL_PACKET,
1083 ERROR_PACKET
1084};
1085
1086enum {
1087 FRAME_NO_START_NO_END = 0,
1088 FRAME_WITH_START = 1,
1089 FRAME_WITH_END = 10,
1090 FRAME_WITH_START_WITH_END = 11
1091};
1092
1093static void ipg_nic_rx_free_skb(struct net_device *dev)
1094{
1095 struct ipg_nic_private *sp = netdev_priv(dev);
1096 unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH;
1097
1098 if (sp->rx_buff[entry]) {
1099 struct ipg_rx *rxfd = sp->rxd + entry;
1100
1101 pci_unmap_single(sp->pdev,
1102 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1103 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1104 dev_kfree_skb_irq(sp->rx_buff[entry]);
1105 sp->rx_buff[entry] = NULL;
1106 }
1107}
1108
1109static int ipg_nic_rx_check_frame_type(struct net_device *dev)
1110{
1111 struct ipg_nic_private *sp = netdev_priv(dev);
1112 struct ipg_rx *rxfd = sp->rxd + (sp->rx_current % IPG_RFDLIST_LENGTH);
1113 int type = FRAME_NO_START_NO_END;
1114
1115 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART)
1116 type += FRAME_WITH_START;
1117 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND)
1118 type += FRAME_WITH_END;
1119 return type;
1120}
1121
1122static int ipg_nic_rx_check_error(struct net_device *dev)
1123{
1124 struct ipg_nic_private *sp = netdev_priv(dev);
1125 unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH;
1126 struct ipg_rx *rxfd = sp->rxd + entry;
1127
1128 if (IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) &
1129 (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME |
1130 IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR |
1131 IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR))) {
1132 IPG_DEBUG_MSG("Rx error, RFS = %016lx\n",
1133 (unsigned long) rxfd->rfs);
1134
1135 /* Increment general receive error statistic. */
1136 sp->stats.rx_errors++;
1137
1138 /* Increment detailed receive error statistics. */
1139 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) {
1140 IPG_DEBUG_MSG("RX FIFO overrun occurred\n");
1141
1142 sp->stats.rx_fifo_errors++;
1143 }
1144
1145 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) {
1146 IPG_DEBUG_MSG("RX runt occurred\n");
1147 sp->stats.rx_length_errors++;
1148 }
1149
1150 /* Do nothing for IPG_RFS_RXOVERSIZEDFRAME,
1151 * error count handled by a IPG statistic register.
1152 */
1153
1154 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) {
1155 IPG_DEBUG_MSG("RX alignment error occurred\n");
1156 sp->stats.rx_frame_errors++;
1157 }
1158
1159 /* Do nothing for IPG_RFS_RXFCSERROR, error count
1160 * handled by a IPG statistic register.
1161 */
1162
1163 /* Free the memory associated with the RX
1164 * buffer since it is erroneous and we will
1165 * not pass it to higher layer processes.
1166 */
1167 if (sp->rx_buff[entry]) {
1168 pci_unmap_single(sp->pdev,
1169 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1170 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1171
1172 dev_kfree_skb_irq(sp->rx_buff[entry]);
1173 sp->rx_buff[entry] = NULL;
1174 }
1175 return ERROR_PACKET;
1176 }
1177 return NORMAL_PACKET;
1178}
1179
1180static void ipg_nic_rx_with_start_and_end(struct net_device *dev,
1181 struct ipg_nic_private *sp,
1182 struct ipg_rx *rxfd, unsigned entry)
1183{
1184 struct ipg_jumbo *jumbo = &sp->jumbo;
1185 struct sk_buff *skb;
1186 int framelen;
1187
1188 if (jumbo->found_start) {
1189 dev_kfree_skb_irq(jumbo->skb);
1190 jumbo->found_start = 0;
1191 jumbo->current_size = 0;
1192 jumbo->skb = NULL;
1193 }
1194
1195 /* 1: found error, 0 no error */
1196 if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET)
1197 return;
1198
1199 skb = sp->rx_buff[entry];
1200 if (!skb)
1201 return;
1202
1203 /* accept this frame and send to upper layer */
1204 framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
1205 if (framelen > sp->rxfrag_size)
1206 framelen = sp->rxfrag_size;
1207
1208 skb_put(skb, framelen);
1209 skb->protocol = eth_type_trans(skb, dev);
1210 skb_checksum_none_assert(skb);
1211 netif_rx(skb);
1212 sp->rx_buff[entry] = NULL;
1213}
1214
1215static void ipg_nic_rx_with_start(struct net_device *dev,
1216 struct ipg_nic_private *sp,
1217 struct ipg_rx *rxfd, unsigned entry)
1218{
1219 struct ipg_jumbo *jumbo = &sp->jumbo;
1220 struct pci_dev *pdev = sp->pdev;
1221 struct sk_buff *skb;
1222
1223 /* 1: found error, 0 no error */
1224 if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET)
1225 return;
1226
1227 /* accept this frame and send to upper layer */
1228 skb = sp->rx_buff[entry];
1229 if (!skb)
1230 return;
1231
1232 if (jumbo->found_start)
1233 dev_kfree_skb_irq(jumbo->skb);
1234
1235 pci_unmap_single(pdev, le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1236 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1237
1238 skb_put(skb, sp->rxfrag_size);
1239
1240 jumbo->found_start = 1;
1241 jumbo->current_size = sp->rxfrag_size;
1242 jumbo->skb = skb;
1243
1244 sp->rx_buff[entry] = NULL;
1245}
1246
1247static void ipg_nic_rx_with_end(struct net_device *dev,
1248 struct ipg_nic_private *sp,
1249 struct ipg_rx *rxfd, unsigned entry)
1250{
1251 struct ipg_jumbo *jumbo = &sp->jumbo;
1252
1253 /* 1: found error, 0 no error */
1254 if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) {
1255 struct sk_buff *skb = sp->rx_buff[entry];
1256
1257 if (!skb)
1258 return;
1259
1260 if (jumbo->found_start) {
1261 int framelen, endframelen;
1262
1263 framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
1264
1265 endframelen = framelen - jumbo->current_size;
1266 if (framelen > sp->rxsupport_size)
1267 dev_kfree_skb_irq(jumbo->skb);
1268 else {
1269 memcpy(skb_put(jumbo->skb, endframelen),
1270 skb->data, endframelen);
1271
1272 jumbo->skb->protocol =
1273 eth_type_trans(jumbo->skb, dev);
1274
1275 skb_checksum_none_assert(jumbo->skb);
1276 netif_rx(jumbo->skb);
1277 }
1278 }
1279
1280 jumbo->found_start = 0;
1281 jumbo->current_size = 0;
1282 jumbo->skb = NULL;
1283
1284 ipg_nic_rx_free_skb(dev);
1285 } else {
1286 dev_kfree_skb_irq(jumbo->skb);
1287 jumbo->found_start = 0;
1288 jumbo->current_size = 0;
1289 jumbo->skb = NULL;
1290 }
1291}
1292
1293static void ipg_nic_rx_no_start_no_end(struct net_device *dev,
1294 struct ipg_nic_private *sp,
1295 struct ipg_rx *rxfd, unsigned entry)
1296{
1297 struct ipg_jumbo *jumbo = &sp->jumbo;
1298
1299 /* 1: found error, 0 no error */
1300 if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) {
1301 struct sk_buff *skb = sp->rx_buff[entry];
1302
1303 if (skb) {
1304 if (jumbo->found_start) {
1305 jumbo->current_size += sp->rxfrag_size;
1306 if (jumbo->current_size <= sp->rxsupport_size) {
1307 memcpy(skb_put(jumbo->skb,
1308 sp->rxfrag_size),
1309 skb->data, sp->rxfrag_size);
1310 }
1311 }
1312 ipg_nic_rx_free_skb(dev);
1313 }
1314 } else {
1315 dev_kfree_skb_irq(jumbo->skb);
1316 jumbo->found_start = 0;
1317 jumbo->current_size = 0;
1318 jumbo->skb = NULL;
1319 }
1320}
1321
1322static int ipg_nic_rx_jumbo(struct net_device *dev)
1323{
1324 struct ipg_nic_private *sp = netdev_priv(dev);
1325 unsigned int curr = sp->rx_current;
1326 void __iomem *ioaddr = sp->ioaddr;
1327 unsigned int i;
1328
1329 IPG_DEBUG_MSG("_nic_rx\n");
1330
1331 for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) {
1332 unsigned int entry = curr % IPG_RFDLIST_LENGTH;
1333 struct ipg_rx *rxfd = sp->rxd + entry;
1334
1335 if (!(rxfd->rfs & cpu_to_le64(IPG_RFS_RFDDONE)))
1336 break;
1337
1338 switch (ipg_nic_rx_check_frame_type(dev)) {
1339 case FRAME_WITH_START_WITH_END:
1340 ipg_nic_rx_with_start_and_end(dev, sp, rxfd, entry);
1341 break;
1342 case FRAME_WITH_START:
1343 ipg_nic_rx_with_start(dev, sp, rxfd, entry);
1344 break;
1345 case FRAME_WITH_END:
1346 ipg_nic_rx_with_end(dev, sp, rxfd, entry);
1347 break;
1348 case FRAME_NO_START_NO_END:
1349 ipg_nic_rx_no_start_no_end(dev, sp, rxfd, entry);
1350 break;
1351 }
1352 }
1353
1354 sp->rx_current = curr;
1355
1356 if (i == IPG_MAXRFDPROCESS_COUNT) {
1357 /* There are more RFDs to process, however the
1358 * allocated amount of RFD processing time has
1359 * expired. Assert Interrupt Requested to make
1360 * sure we come back to process the remaining RFDs.
1361 */
1362 ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL);
1363 }
1364
1365 ipg_nic_rxrestore(dev);
1366
1367 return 0;
1368}
1369
1370static int ipg_nic_rx(struct net_device *dev)
1371{
1372 /* Transfer received Ethernet frames to higher network layers. */
1373 struct ipg_nic_private *sp = netdev_priv(dev);
1374 unsigned int curr = sp->rx_current;
1375 void __iomem *ioaddr = sp->ioaddr;
1376 struct ipg_rx *rxfd;
1377 unsigned int i;
1378
1379 IPG_DEBUG_MSG("_nic_rx\n");
1380
1381#define __RFS_MASK \
1382 cpu_to_le64(IPG_RFS_RFDDONE | IPG_RFS_FRAMESTART | IPG_RFS_FRAMEEND)
1383
1384 for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) {
1385 unsigned int entry = curr % IPG_RFDLIST_LENGTH;
1386 struct sk_buff *skb = sp->rx_buff[entry];
1387 unsigned int framelen;
1388
1389 rxfd = sp->rxd + entry;
1390
1391 if (((rxfd->rfs & __RFS_MASK) != __RFS_MASK) || !skb)
1392 break;
1393
1394 /* Get received frame length. */
1395 framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
1396
1397 /* Check for jumbo frame arrival with too small
1398 * RXFRAG_SIZE.
1399 */
1400 if (framelen > sp->rxfrag_size) {
1401 IPG_DEBUG_MSG
1402 ("RFS FrameLen > allocated fragment size\n");
1403
1404 framelen = sp->rxfrag_size;
1405 }
1406
1407 if ((IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) &
1408 (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME |
1409 IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR |
1410 IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR)))) {
1411
1412 IPG_DEBUG_MSG("Rx error, RFS = %016lx\n",
1413 (unsigned long int) rxfd->rfs);
1414
1415 /* Increment general receive error statistic. */
1416 sp->stats.rx_errors++;
1417
1418 /* Increment detailed receive error statistics. */
1419 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) {
1420 IPG_DEBUG_MSG("RX FIFO overrun occurred\n");
1421 sp->stats.rx_fifo_errors++;
1422 }
1423
1424 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) {
1425 IPG_DEBUG_MSG("RX runt occurred\n");
1426 sp->stats.rx_length_errors++;
1427 }
1428
1429 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXOVERSIZEDFRAME) ;
1430 /* Do nothing, error count handled by a IPG
1431 * statistic register.
1432 */
1433
1434 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) {
1435 IPG_DEBUG_MSG("RX alignment error occurred\n");
1436 sp->stats.rx_frame_errors++;
1437 }
1438
1439 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFCSERROR) ;
1440 /* Do nothing, error count handled by a IPG
1441 * statistic register.
1442 */
1443
1444 /* Free the memory associated with the RX
1445 * buffer since it is erroneous and we will
1446 * not pass it to higher layer processes.
1447 */
1448 if (skb) {
1449 __le64 info = rxfd->frag_info;
1450
1451 pci_unmap_single(sp->pdev,
1452 le64_to_cpu(info) & ~IPG_RFI_FRAGLEN,
1453 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1454
1455 dev_kfree_skb_irq(skb);
1456 }
1457 } else {
1458
1459 /* Adjust the new buffer length to accommodate the size
1460 * of the received frame.
1461 */
1462 skb_put(skb, framelen);
1463
1464 /* Set the buffer's protocol field to Ethernet. */
1465 skb->protocol = eth_type_trans(skb, dev);
1466
1467 /* The IPG encountered an error with (or
1468 * there were no) IP/TCP/UDP checksums.
1469 * This may or may not indicate an invalid
1470 * IP/TCP/UDP frame was received. Let the
1471 * upper layer decide.
1472 */
1473 skb_checksum_none_assert(skb);
1474
1475 /* Hand off frame for higher layer processing.
1476 * The function netif_rx() releases the sk_buff
1477 * when processing completes.
1478 */
1479 netif_rx(skb);
1480 }
1481
1482 /* Assure RX buffer is not reused by IPG. */
1483 sp->rx_buff[entry] = NULL;
1484 }
1485
1486 /*
1487 * If there are more RFDs to process and the allocated amount of RFD
1488 * processing time has expired, assert Interrupt Requested to make
1489 * sure we come back to process the remaining RFDs.
1490 */
1491 if (i == IPG_MAXRFDPROCESS_COUNT)
1492 ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL);
1493
1494#ifdef IPG_DEBUG
1495 /* Check if the RFD list contained no receive frame data. */
1496 if (!i)
1497 sp->EmptyRFDListCount++;
1498#endif
1499 while ((le64_to_cpu(rxfd->rfs) & IPG_RFS_RFDDONE) &&
1500 !((le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART) &&
1501 (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND))) {
1502 unsigned int entry = curr++ % IPG_RFDLIST_LENGTH;
1503
1504 rxfd = sp->rxd + entry;
1505
1506 IPG_DEBUG_MSG("Frame requires multiple RFDs\n");
1507
1508 /* An unexpected event, additional code needed to handle
1509 * properly. So for the time being, just disregard the
1510 * frame.
1511 */
1512
1513 /* Free the memory associated with the RX
1514 * buffer since it is erroneous and we will
1515 * not pass it to higher layer processes.
1516 */
1517 if (sp->rx_buff[entry]) {
1518 pci_unmap_single(sp->pdev,
1519 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1520 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1521 dev_kfree_skb_irq(sp->rx_buff[entry]);
1522 }
1523
1524 /* Assure RX buffer is not reused by IPG. */
1525 sp->rx_buff[entry] = NULL;
1526 }
1527
1528 sp->rx_current = curr;
1529
1530 /* Check to see if there are a minimum number of used
1531 * RFDs before restoring any (should improve performance.)
1532 */
1533 if ((curr - sp->rx_dirty) >= IPG_MINUSEDRFDSTOFREE)
1534 ipg_nic_rxrestore(dev);
1535
1536 return 0;
1537}
1538
1539static void ipg_reset_after_host_error(struct work_struct *work)
1540{
1541 struct ipg_nic_private *sp =
1542 container_of(work, struct ipg_nic_private, task.work);
1543 struct net_device *dev = sp->dev;
1544
1545 /*
1546 * Acknowledge HostError interrupt by resetting
1547 * IPG DMA and HOST.
1548 */
1549 ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA);
1550
1551 init_rfdlist(dev);
1552 init_tfdlist(dev);
1553
1554 if (ipg_io_config(dev) < 0) {
1555 netdev_info(dev, "Cannot recover from PCI error\n");
1556 schedule_delayed_work(&sp->task, HZ);
1557 }
1558}
1559
1560static irqreturn_t ipg_interrupt_handler(int irq, void *dev_inst)
1561{
1562 struct net_device *dev = dev_inst;
1563 struct ipg_nic_private *sp = netdev_priv(dev);
1564 void __iomem *ioaddr = sp->ioaddr;
1565 unsigned int handled = 0;
1566 u16 status;
1567
1568 IPG_DEBUG_MSG("_interrupt_handler\n");
1569
1570 if (sp->is_jumbo)
1571 ipg_nic_rxrestore(dev);
1572
1573 spin_lock(&sp->lock);
1574
1575 /* Get interrupt source information, and acknowledge
1576 * some (i.e. TxDMAComplete, RxDMAComplete, RxEarly,
1577 * IntRequested, MacControlFrame, LinkEvent) interrupts
1578 * if issued. Also, all IPG interrupts are disabled by
1579 * reading IntStatusAck.
1580 */
1581 status = ipg_r16(INT_STATUS_ACK);
1582
1583 IPG_DEBUG_MSG("IntStatusAck = %04x\n", status);
1584
1585 /* Shared IRQ of remove event. */
1586 if (!(status & IPG_IS_RSVD_MASK))
1587 goto out_enable;
1588
1589 handled = 1;
1590
1591 if (unlikely(!netif_running(dev)))
1592 goto out_unlock;
1593
1594 /* If RFDListEnd interrupt, restore all used RFDs. */
1595 if (status & IPG_IS_RFD_LIST_END) {
1596 IPG_DEBUG_MSG("RFDListEnd Interrupt\n");
1597
1598 /* The RFD list end indicates an RFD was encountered
1599 * with a 0 NextPtr, or with an RFDDone bit set to 1
1600 * (indicating the RFD is not read for use by the
1601 * IPG.) Try to restore all RFDs.
1602 */
1603 ipg_nic_rxrestore(dev);
1604
1605#ifdef IPG_DEBUG
1606 /* Increment the RFDlistendCount counter. */
1607 sp->RFDlistendCount++;
1608#endif
1609 }
1610
1611 /* If RFDListEnd, RxDMAPriority, RxDMAComplete, or
1612 * IntRequested interrupt, process received frames. */
1613 if ((status & IPG_IS_RX_DMA_PRIORITY) ||
1614 (status & IPG_IS_RFD_LIST_END) ||
1615 (status & IPG_IS_RX_DMA_COMPLETE) ||
1616 (status & IPG_IS_INT_REQUESTED)) {
1617#ifdef IPG_DEBUG
1618 /* Increment the RFD list checked counter if interrupted
1619 * only to check the RFD list. */
1620 if (status & (~(IPG_IS_RX_DMA_PRIORITY | IPG_IS_RFD_LIST_END |
1621 IPG_IS_RX_DMA_COMPLETE | IPG_IS_INT_REQUESTED) &
1622 (IPG_IS_HOST_ERROR | IPG_IS_TX_DMA_COMPLETE |
1623 IPG_IS_LINK_EVENT | IPG_IS_TX_COMPLETE |
1624 IPG_IS_UPDATE_STATS)))
1625 sp->RFDListCheckedCount++;
1626#endif
1627
1628 if (sp->is_jumbo)
1629 ipg_nic_rx_jumbo(dev);
1630 else
1631 ipg_nic_rx(dev);
1632 }
1633
1634 /* If TxDMAComplete interrupt, free used TFDs. */
1635 if (status & IPG_IS_TX_DMA_COMPLETE)
1636 ipg_nic_txfree(dev);
1637
1638 /* TxComplete interrupts indicate one of numerous actions.
1639 * Determine what action to take based on TXSTATUS register.
1640 */
1641 if (status & IPG_IS_TX_COMPLETE)
1642 ipg_nic_txcleanup(dev);
1643
1644 /* If UpdateStats interrupt, update Linux Ethernet statistics */
1645 if (status & IPG_IS_UPDATE_STATS)
1646 ipg_nic_get_stats(dev);
1647
1648 /* If HostError interrupt, reset IPG. */
1649 if (status & IPG_IS_HOST_ERROR) {
1650 IPG_DDEBUG_MSG("HostError Interrupt\n");
1651
1652 schedule_delayed_work(&sp->task, 0);
1653 }
1654
1655 /* If LinkEvent interrupt, resolve autonegotiation. */
1656 if (status & IPG_IS_LINK_EVENT) {
1657 if (ipg_config_autoneg(dev) < 0)
1658 netdev_info(dev, "Auto-negotiation error\n");
1659 }
1660
1661 /* If MACCtrlFrame interrupt, do nothing. */
1662 if (status & IPG_IS_MAC_CTRL_FRAME)
1663 IPG_DEBUG_MSG("MACCtrlFrame interrupt\n");
1664
1665 /* If RxComplete interrupt, do nothing. */
1666 if (status & IPG_IS_RX_COMPLETE)
1667 IPG_DEBUG_MSG("RxComplete interrupt\n");
1668
1669 /* If RxEarly interrupt, do nothing. */
1670 if (status & IPG_IS_RX_EARLY)
1671 IPG_DEBUG_MSG("RxEarly interrupt\n");
1672
1673out_enable:
1674 /* Re-enable IPG interrupts. */
1675 ipg_w16(IPG_IE_TX_DMA_COMPLETE | IPG_IE_RX_DMA_COMPLETE |
1676 IPG_IE_HOST_ERROR | IPG_IE_INT_REQUESTED | IPG_IE_TX_COMPLETE |
1677 IPG_IE_LINK_EVENT | IPG_IE_UPDATE_STATS, INT_ENABLE);
1678out_unlock:
1679 spin_unlock(&sp->lock);
1680
1681 return IRQ_RETVAL(handled);
1682}
1683
1684static void ipg_rx_clear(struct ipg_nic_private *sp)
1685{
1686 unsigned int i;
1687
1688 for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
1689 if (sp->rx_buff[i]) {
1690 struct ipg_rx *rxfd = sp->rxd + i;
1691
1692 dev_kfree_skb_irq(sp->rx_buff[i]);
1693 sp->rx_buff[i] = NULL;
1694 pci_unmap_single(sp->pdev,
1695 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1696 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1697 }
1698 }
1699}
1700
1701static void ipg_tx_clear(struct ipg_nic_private *sp)
1702{
1703 unsigned int i;
1704
1705 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
1706 if (sp->tx_buff[i]) {
1707 struct ipg_tx *txfd = sp->txd + i;
1708
1709 pci_unmap_single(sp->pdev,
1710 le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN,
1711 sp->tx_buff[i]->len, PCI_DMA_TODEVICE);
1712
1713 dev_kfree_skb_irq(sp->tx_buff[i]);
1714
1715 sp->tx_buff[i] = NULL;
1716 }
1717 }
1718}
1719
1720static int ipg_nic_open(struct net_device *dev)
1721{
1722 struct ipg_nic_private *sp = netdev_priv(dev);
1723 void __iomem *ioaddr = sp->ioaddr;
1724 struct pci_dev *pdev = sp->pdev;
1725 int rc;
1726
1727 IPG_DEBUG_MSG("_nic_open\n");
1728
1729 sp->rx_buf_sz = sp->rxsupport_size;
1730
1731 /* Check for interrupt line conflicts, and request interrupt
1732 * line for IPG.
1733 *
1734 * IMPORTANT: Disable IPG interrupts prior to registering
1735 * IRQ.
1736 */
1737 ipg_w16(0x0000, INT_ENABLE);
1738
1739 /* Register the interrupt line to be used by the IPG within
1740 * the Linux system.
1741 */
1742 rc = request_irq(pdev->irq, ipg_interrupt_handler, IRQF_SHARED,
1743 dev->name, dev);
1744 if (rc < 0) {
1745 netdev_info(dev, "Error when requesting interrupt\n");
1746 goto out;
1747 }
1748
1749 dev->irq = pdev->irq;
1750
1751 rc = -ENOMEM;
1752
1753 sp->rxd = dma_alloc_coherent(&pdev->dev, IPG_RX_RING_BYTES,
1754 &sp->rxd_map, GFP_KERNEL);
1755 if (!sp->rxd)
1756 goto err_free_irq_0;
1757
1758 sp->txd = dma_alloc_coherent(&pdev->dev, IPG_TX_RING_BYTES,
1759 &sp->txd_map, GFP_KERNEL);
1760 if (!sp->txd)
1761 goto err_free_rx_1;
1762
1763 rc = init_rfdlist(dev);
1764 if (rc < 0) {
1765 netdev_info(dev, "Error during configuration\n");
1766 goto err_free_tx_2;
1767 }
1768
1769 init_tfdlist(dev);
1770
1771 rc = ipg_io_config(dev);
1772 if (rc < 0) {
1773 netdev_info(dev, "Error during configuration\n");
1774 goto err_release_tfdlist_3;
1775 }
1776
1777 /* Resolve autonegotiation. */
1778 if (ipg_config_autoneg(dev) < 0)
1779 netdev_info(dev, "Auto-negotiation error\n");
1780
1781 /* initialize JUMBO Frame control variable */
1782 sp->jumbo.found_start = 0;
1783 sp->jumbo.current_size = 0;
1784 sp->jumbo.skb = NULL;
1785
1786 /* Enable transmit and receive operation of the IPG. */
1787 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_RX_ENABLE | IPG_MC_TX_ENABLE) &
1788 IPG_MC_RSVD_MASK, MAC_CTRL);
1789
1790 netif_start_queue(dev);
1791out:
1792 return rc;
1793
1794err_release_tfdlist_3:
1795 ipg_tx_clear(sp);
1796 ipg_rx_clear(sp);
1797err_free_tx_2:
1798 dma_free_coherent(&pdev->dev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map);
1799err_free_rx_1:
1800 dma_free_coherent(&pdev->dev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map);
1801err_free_irq_0:
1802 free_irq(pdev->irq, dev);
1803 goto out;
1804}
1805
1806static int ipg_nic_stop(struct net_device *dev)
1807{
1808 struct ipg_nic_private *sp = netdev_priv(dev);
1809 void __iomem *ioaddr = sp->ioaddr;
1810 struct pci_dev *pdev = sp->pdev;
1811
1812 IPG_DEBUG_MSG("_nic_stop\n");
1813
1814 netif_stop_queue(dev);
1815
1816 IPG_DUMPTFDLIST(dev);
1817
1818 do {
1819 (void) ipg_r16(INT_STATUS_ACK);
1820
1821 ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA);
1822
1823 synchronize_irq(pdev->irq);
1824 } while (ipg_r16(INT_ENABLE) & IPG_IE_RSVD_MASK);
1825
1826 ipg_rx_clear(sp);
1827
1828 ipg_tx_clear(sp);
1829
1830 pci_free_consistent(pdev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map);
1831 pci_free_consistent(pdev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map);
1832
1833 free_irq(pdev->irq, dev);
1834
1835 return 0;
1836}
1837
1838static netdev_tx_t ipg_nic_hard_start_xmit(struct sk_buff *skb,
1839 struct net_device *dev)
1840{
1841 struct ipg_nic_private *sp = netdev_priv(dev);
1842 void __iomem *ioaddr = sp->ioaddr;
1843 unsigned int entry = sp->tx_current % IPG_TFDLIST_LENGTH;
1844 unsigned long flags;
1845 struct ipg_tx *txfd;
1846
1847 IPG_DDEBUG_MSG("_nic_hard_start_xmit\n");
1848
1849 /* If in 10Mbps mode, stop the transmit queue so
1850 * no more transmit frames are accepted.
1851 */
1852 if (sp->tenmbpsmode)
1853 netif_stop_queue(dev);
1854
1855 if (sp->reset_current_tfd) {
1856 sp->reset_current_tfd = 0;
1857 entry = 0;
1858 }
1859
1860 txfd = sp->txd + entry;
1861
1862 sp->tx_buff[entry] = skb;
1863
1864 /* Clear all TFC fields, except TFDDONE. */
1865 txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE);
1866
1867 /* Specify the TFC field within the TFD. */
1868 txfd->tfc |= cpu_to_le64(IPG_TFC_WORDALIGNDISABLED |
1869 (IPG_TFC_FRAMEID & sp->tx_current) |
1870 (IPG_TFC_FRAGCOUNT & (1 << 24)));
1871 /*
1872 * 16--17 (WordAlign) <- 3 (disable),
1873 * 0--15 (FrameId) <- sp->tx_current,
1874 * 24--27 (FragCount) <- 1
1875 */
1876
1877 /* Request TxComplete interrupts at an interval defined
1878 * by the constant IPG_FRAMESBETWEENTXCOMPLETES.
1879 * Request TxComplete interrupt for every frame
1880 * if in 10Mbps mode to accommodate problem with 10Mbps
1881 * processing.
1882 */
1883 if (sp->tenmbpsmode)
1884 txfd->tfc |= cpu_to_le64(IPG_TFC_TXINDICATE);
1885 txfd->tfc |= cpu_to_le64(IPG_TFC_TXDMAINDICATE);
1886 /* Based on compilation option, determine if FCS is to be
1887 * appended to transmit frame by IPG.
1888 */
1889 if (!(IPG_APPEND_FCS_ON_TX))
1890 txfd->tfc |= cpu_to_le64(IPG_TFC_FCSAPPENDDISABLE);
1891
1892 /* Based on compilation option, determine if IP, TCP and/or
1893 * UDP checksums are to be added to transmit frame by IPG.
1894 */
1895 if (IPG_ADD_IPCHECKSUM_ON_TX)
1896 txfd->tfc |= cpu_to_le64(IPG_TFC_IPCHECKSUMENABLE);
1897
1898 if (IPG_ADD_TCPCHECKSUM_ON_TX)
1899 txfd->tfc |= cpu_to_le64(IPG_TFC_TCPCHECKSUMENABLE);
1900
1901 if (IPG_ADD_UDPCHECKSUM_ON_TX)
1902 txfd->tfc |= cpu_to_le64(IPG_TFC_UDPCHECKSUMENABLE);
1903
1904 /* Based on compilation option, determine if VLAN tag info is to be
1905 * inserted into transmit frame by IPG.
1906 */
1907 if (IPG_INSERT_MANUAL_VLAN_TAG) {
1908 txfd->tfc |= cpu_to_le64(IPG_TFC_VLANTAGINSERT |
1909 ((u64) IPG_MANUAL_VLAN_VID << 32) |
1910 ((u64) IPG_MANUAL_VLAN_CFI << 44) |
1911 ((u64) IPG_MANUAL_VLAN_USERPRIORITY << 45));
1912 }
1913
1914 /* The fragment start location within system memory is defined
1915 * by the sk_buff structure's data field. The physical address
1916 * of this location within the system's virtual memory space
1917 * is determined using the IPG_HOST2BUS_MAP function.
1918 */
1919 txfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data,
1920 skb->len, PCI_DMA_TODEVICE));
1921
1922 /* The length of the fragment within system memory is defined by
1923 * the sk_buff structure's len field.
1924 */
1925 txfd->frag_info |= cpu_to_le64(IPG_TFI_FRAGLEN &
1926 ((u64) (skb->len & 0xffff) << 48));
1927
1928 /* Clear the TFDDone bit last to indicate the TFD is ready
1929 * for transfer to the IPG.
1930 */
1931 txfd->tfc &= cpu_to_le64(~IPG_TFC_TFDDONE);
1932
1933 spin_lock_irqsave(&sp->lock, flags);
1934
1935 sp->tx_current++;
1936
1937 mmiowb();
1938
1939 ipg_w32(IPG_DC_TX_DMA_POLL_NOW, DMA_CTRL);
1940
1941 if (sp->tx_current == (sp->tx_dirty + IPG_TFDLIST_LENGTH))
1942 netif_stop_queue(dev);
1943
1944 spin_unlock_irqrestore(&sp->lock, flags);
1945
1946 return NETDEV_TX_OK;
1947}
1948
1949static void ipg_set_phy_default_param(unsigned char rev,
1950 struct net_device *dev, int phy_address)
1951{
1952 unsigned short length;
1953 unsigned char revision;
1954 const unsigned short *phy_param;
1955 unsigned short address, value;
1956
1957 phy_param = &DefaultPhyParam[0];
1958 length = *phy_param & 0x00FF;
1959 revision = (unsigned char)((*phy_param) >> 8);
1960 phy_param++;
1961 while (length != 0) {
1962 if (rev == revision) {
1963 while (length > 1) {
1964 address = *phy_param;
1965 value = *(phy_param + 1);
1966 phy_param += 2;
1967 mdio_write(dev, phy_address, address, value);
1968 length -= 4;
1969 }
1970 break;
1971 } else {
1972 phy_param += length / 2;
1973 length = *phy_param & 0x00FF;
1974 revision = (unsigned char)((*phy_param) >> 8);
1975 phy_param++;
1976 }
1977 }
1978}
1979
1980static int read_eeprom(struct net_device *dev, int eep_addr)
1981{
1982 void __iomem *ioaddr = ipg_ioaddr(dev);
1983 unsigned int i;
1984 int ret = 0;
1985 u16 value;
1986
1987 value = IPG_EC_EEPROM_READOPCODE | (eep_addr & 0xff);
1988 ipg_w16(value, EEPROM_CTRL);
1989
1990 for (i = 0; i < 1000; i++) {
1991 u16 data;
1992
1993 mdelay(10);
1994 data = ipg_r16(EEPROM_CTRL);
1995 if (!(data & IPG_EC_EEPROM_BUSY)) {
1996 ret = ipg_r16(EEPROM_DATA);
1997 break;
1998 }
1999 }
2000 return ret;
2001}
2002
2003static void ipg_init_mii(struct net_device *dev)
2004{
2005 struct ipg_nic_private *sp = netdev_priv(dev);
2006 struct mii_if_info *mii_if = &sp->mii_if;
2007 int phyaddr;
2008
2009 mii_if->dev = dev;
2010 mii_if->mdio_read = mdio_read;
2011 mii_if->mdio_write = mdio_write;
2012 mii_if->phy_id_mask = 0x1f;
2013 mii_if->reg_num_mask = 0x1f;
2014
2015 mii_if->phy_id = phyaddr = ipg_find_phyaddr(dev);
2016
2017 if (phyaddr != 0x1f) {
2018 u16 mii_phyctrl, mii_1000cr;
2019
2020 mii_1000cr = mdio_read(dev, phyaddr, MII_CTRL1000);
2021 mii_1000cr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF |
2022 GMII_PHY_1000BASETCONTROL_PreferMaster;
2023 mdio_write(dev, phyaddr, MII_CTRL1000, mii_1000cr);
2024
2025 mii_phyctrl = mdio_read(dev, phyaddr, MII_BMCR);
2026
2027 /* Set default phyparam */
2028 ipg_set_phy_default_param(sp->pdev->revision, dev, phyaddr);
2029
2030 /* Reset PHY */
2031 mii_phyctrl |= BMCR_RESET | BMCR_ANRESTART;
2032 mdio_write(dev, phyaddr, MII_BMCR, mii_phyctrl);
2033
2034 }
2035}
2036
2037static int ipg_hw_init(struct net_device *dev)
2038{
2039 struct ipg_nic_private *sp = netdev_priv(dev);
2040 void __iomem *ioaddr = sp->ioaddr;
2041 unsigned int i;
2042 int rc;
2043
2044 /* Read/Write and Reset EEPROM Value */
2045 /* Read LED Mode Configuration from EEPROM */
2046 sp->led_mode = read_eeprom(dev, 6);
2047
2048 /* Reset all functions within the IPG. Do not assert
2049 * RST_OUT as not compatible with some PHYs.
2050 */
2051 rc = ipg_reset(dev, IPG_RESET_MASK);
2052 if (rc < 0)
2053 goto out;
2054
2055 ipg_init_mii(dev);
2056
2057 /* Read MAC Address from EEPROM */
2058 for (i = 0; i < 3; i++)
2059 sp->station_addr[i] = read_eeprom(dev, 16 + i);
2060
2061 for (i = 0; i < 3; i++)
2062 ipg_w16(sp->station_addr[i], STATION_ADDRESS_0 + 2*i);
2063
2064 /* Set station address in ethernet_device structure. */
2065 dev->dev_addr[0] = ipg_r16(STATION_ADDRESS_0) & 0x00ff;
2066 dev->dev_addr[1] = (ipg_r16(STATION_ADDRESS_0) & 0xff00) >> 8;
2067 dev->dev_addr[2] = ipg_r16(STATION_ADDRESS_1) & 0x00ff;
2068 dev->dev_addr[3] = (ipg_r16(STATION_ADDRESS_1) & 0xff00) >> 8;
2069 dev->dev_addr[4] = ipg_r16(STATION_ADDRESS_2) & 0x00ff;
2070 dev->dev_addr[5] = (ipg_r16(STATION_ADDRESS_2) & 0xff00) >> 8;
2071out:
2072 return rc;
2073}
2074
2075static int ipg_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2076{
2077 struct ipg_nic_private *sp = netdev_priv(dev);
2078 int rc;
2079
2080 mutex_lock(&sp->mii_mutex);
2081 rc = generic_mii_ioctl(&sp->mii_if, if_mii(ifr), cmd, NULL);
2082 mutex_unlock(&sp->mii_mutex);
2083
2084 return rc;
2085}
2086
2087static int ipg_nic_change_mtu(struct net_device *dev, int new_mtu)
2088{
2089 struct ipg_nic_private *sp = netdev_priv(dev);
2090 int err;
2091
2092 /* Function to accommodate changes to Maximum Transfer Unit
2093 * (or MTU) of IPG NIC. Cannot use default function since
2094 * the default will not allow for MTU > 1500 bytes.
2095 */
2096
2097 IPG_DEBUG_MSG("_nic_change_mtu\n");
2098
2099 /*
2100 * Check that the new MTU value is between 68 (14 byte header, 46 byte
2101 * payload, 4 byte FCS) and 10 KB, which is the largest supported MTU.
2102 */
2103 if (new_mtu < 68 || new_mtu > 10240)
2104 return -EINVAL;
2105
2106 err = ipg_nic_stop(dev);
2107 if (err)
2108 return err;
2109
2110 dev->mtu = new_mtu;
2111
2112 sp->max_rxframe_size = new_mtu;
2113
2114 sp->rxfrag_size = new_mtu;
2115 if (sp->rxfrag_size > 4088)
2116 sp->rxfrag_size = 4088;
2117
2118 sp->rxsupport_size = sp->max_rxframe_size;
2119
2120 if (new_mtu > 0x0600)
2121 sp->is_jumbo = true;
2122 else
2123 sp->is_jumbo = false;
2124
2125 return ipg_nic_open(dev);
2126}
2127
2128static int ipg_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2129{
2130 struct ipg_nic_private *sp = netdev_priv(dev);
2131 int rc;
2132
2133 mutex_lock(&sp->mii_mutex);
2134 rc = mii_ethtool_gset(&sp->mii_if, cmd);
2135 mutex_unlock(&sp->mii_mutex);
2136
2137 return rc;
2138}
2139
2140static int ipg_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2141{
2142 struct ipg_nic_private *sp = netdev_priv(dev);
2143 int rc;
2144
2145 mutex_lock(&sp->mii_mutex);
2146 rc = mii_ethtool_sset(&sp->mii_if, cmd);
2147 mutex_unlock(&sp->mii_mutex);
2148
2149 return rc;
2150}
2151
2152static int ipg_nway_reset(struct net_device *dev)
2153{
2154 struct ipg_nic_private *sp = netdev_priv(dev);
2155 int rc;
2156
2157 mutex_lock(&sp->mii_mutex);
2158 rc = mii_nway_restart(&sp->mii_if);
2159 mutex_unlock(&sp->mii_mutex);
2160
2161 return rc;
2162}
2163
2164static const struct ethtool_ops ipg_ethtool_ops = {
2165 .get_settings = ipg_get_settings,
2166 .set_settings = ipg_set_settings,
2167 .nway_reset = ipg_nway_reset,
2168};
2169
2170static void ipg_remove(struct pci_dev *pdev)
2171{
2172 struct net_device *dev = pci_get_drvdata(pdev);
2173 struct ipg_nic_private *sp = netdev_priv(dev);
2174
2175 IPG_DEBUG_MSG("_remove\n");
2176
2177 /* Un-register Ethernet device. */
2178 unregister_netdev(dev);
2179
2180 pci_iounmap(pdev, sp->ioaddr);
2181
2182 pci_release_regions(pdev);
2183
2184 free_netdev(dev);
2185 pci_disable_device(pdev);
2186}
2187
2188static const struct net_device_ops ipg_netdev_ops = {
2189 .ndo_open = ipg_nic_open,
2190 .ndo_stop = ipg_nic_stop,
2191 .ndo_start_xmit = ipg_nic_hard_start_xmit,
2192 .ndo_get_stats = ipg_nic_get_stats,
2193 .ndo_set_rx_mode = ipg_nic_set_multicast_list,
2194 .ndo_do_ioctl = ipg_ioctl,
2195 .ndo_tx_timeout = ipg_tx_timeout,
2196 .ndo_change_mtu = ipg_nic_change_mtu,
2197 .ndo_set_mac_address = eth_mac_addr,
2198 .ndo_validate_addr = eth_validate_addr,
2199};
2200
2201static int ipg_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2202{
2203 unsigned int i = id->driver_data;
2204 struct ipg_nic_private *sp;
2205 struct net_device *dev;
2206 void __iomem *ioaddr;
2207 int rc;
2208
2209 rc = pci_enable_device(pdev);
2210 if (rc < 0)
2211 goto out;
2212
2213 pr_info("%s: %s\n", pci_name(pdev), ipg_brand_name[i]);
2214
2215 pci_set_master(pdev);
2216
2217 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
2218 if (rc < 0) {
2219 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2220 if (rc < 0) {
2221 pr_err("%s: DMA config failed\n", pci_name(pdev));
2222 goto err_disable_0;
2223 }
2224 }
2225
2226 /*
2227 * Initialize net device.
2228 */
2229 dev = alloc_etherdev(sizeof(struct ipg_nic_private));
2230 if (!dev) {
2231 rc = -ENOMEM;
2232 goto err_disable_0;
2233 }
2234
2235 sp = netdev_priv(dev);
2236 spin_lock_init(&sp->lock);
2237 mutex_init(&sp->mii_mutex);
2238
2239 sp->is_jumbo = IPG_IS_JUMBO;
2240 sp->rxfrag_size = IPG_RXFRAG_SIZE;
2241 sp->rxsupport_size = IPG_RXSUPPORT_SIZE;
2242 sp->max_rxframe_size = IPG_MAX_RXFRAME_SIZE;
2243
2244 /* Declare IPG NIC functions for Ethernet device methods.
2245 */
2246 dev->netdev_ops = &ipg_netdev_ops;
2247 SET_NETDEV_DEV(dev, &pdev->dev);
2248 dev->ethtool_ops = &ipg_ethtool_ops;
2249
2250 rc = pci_request_regions(pdev, DRV_NAME);
2251 if (rc)
2252 goto err_free_dev_1;
2253
2254 ioaddr = pci_iomap(pdev, 1, pci_resource_len(pdev, 1));
2255 if (!ioaddr) {
2256 pr_err("%s: cannot map MMIO\n", pci_name(pdev));
2257 rc = -EIO;
2258 goto err_release_regions_2;
2259 }
2260
2261 /* Save the pointer to the PCI device information. */
2262 sp->ioaddr = ioaddr;
2263 sp->pdev = pdev;
2264 sp->dev = dev;
2265
2266 INIT_DELAYED_WORK(&sp->task, ipg_reset_after_host_error);
2267
2268 pci_set_drvdata(pdev, dev);
2269
2270 rc = ipg_hw_init(dev);
2271 if (rc < 0)
2272 goto err_unmap_3;
2273
2274 rc = register_netdev(dev);
2275 if (rc < 0)
2276 goto err_unmap_3;
2277
2278 netdev_info(dev, "Ethernet device registered\n");
2279out:
2280 return rc;
2281
2282err_unmap_3:
2283 pci_iounmap(pdev, ioaddr);
2284err_release_regions_2:
2285 pci_release_regions(pdev);
2286err_free_dev_1:
2287 free_netdev(dev);
2288err_disable_0:
2289 pci_disable_device(pdev);
2290 goto out;
2291}
2292
2293static struct pci_driver ipg_pci_driver = {
2294 .name = IPG_DRIVER_NAME,
2295 .id_table = ipg_pci_tbl,
2296 .probe = ipg_probe,
2297 .remove = ipg_remove,
2298};
2299
2300module_pci_driver(ipg_pci_driver);
diff --git a/drivers/net/ethernet/icplus/ipg.h b/drivers/net/ethernet/icplus/ipg.h
deleted file mode 100644
index de606281f97b..000000000000
--- a/drivers/net/ethernet/icplus/ipg.h
+++ /dev/null
@@ -1,748 +0,0 @@
1/*
2 * Include file for Gigabit Ethernet device driver for Network
3 * Interface Cards (NICs) utilizing the Tamarack Microelectronics
4 * Inc. IPG Gigabit or Triple Speed Ethernet Media Access
5 * Controller.
6 */
7#ifndef __LINUX_IPG_H
8#define __LINUX_IPG_H
9
10#include <linux/module.h>
11
12#include <linux/kernel.h>
13#include <linux/pci.h>
14#include <linux/ioport.h>
15#include <linux/errno.h>
16#include <asm/io.h>
17#include <linux/delay.h>
18#include <linux/types.h>
19#include <linux/netdevice.h>
20#include <linux/etherdevice.h>
21#include <linux/skbuff.h>
22#include <asm/bitops.h>
23
24/*
25 * Constants
26 */
27
28/* GMII based PHY IDs */
29#define NS 0x2000
30#define MARVELL 0x0141
31#define ICPLUS_PHY 0x243
32
33/* NIC Physical Layer Device MII register fields. */
34#define MII_PHY_SELECTOR_IEEE8023 0x0001
35#define MII_PHY_TECHABILITYFIELD 0x1FE0
36
37/* GMII_PHY_1000 need to set to prefer master */
38#define GMII_PHY_1000BASETCONTROL_PreferMaster 0x0400
39
40/* NIC Physical Layer Device GMII constants. */
41#define GMII_PREAMBLE 0xFFFFFFFF
42#define GMII_ST 0x1
43#define GMII_READ 0x2
44#define GMII_WRITE 0x1
45#define GMII_TA_READ_MASK 0x1
46#define GMII_TA_WRITE 0x2
47
48/* I/O register offsets. */
49enum ipg_regs {
50 DMA_CTRL = 0x00,
51 RX_DMA_STATUS = 0x08, /* Unused + reserved */
52 TFD_LIST_PTR_0 = 0x10,
53 TFD_LIST_PTR_1 = 0x14,
54 TX_DMA_BURST_THRESH = 0x18,
55 TX_DMA_URGENT_THRESH = 0x19,
56 TX_DMA_POLL_PERIOD = 0x1a,
57 RFD_LIST_PTR_0 = 0x1c,
58 RFD_LIST_PTR_1 = 0x20,
59 RX_DMA_BURST_THRESH = 0x24,
60 RX_DMA_URGENT_THRESH = 0x25,
61 RX_DMA_POLL_PERIOD = 0x26,
62 DEBUG_CTRL = 0x2c,
63 ASIC_CTRL = 0x30,
64 FIFO_CTRL = 0x38, /* Unused */
65 FLOW_OFF_THRESH = 0x3c,
66 FLOW_ON_THRESH = 0x3e,
67 EEPROM_DATA = 0x48,
68 EEPROM_CTRL = 0x4a,
69 EXPROM_ADDR = 0x4c, /* Unused */
70 EXPROM_DATA = 0x50, /* Unused */
71 WAKE_EVENT = 0x51, /* Unused */
72 COUNTDOWN = 0x54, /* Unused */
73 INT_STATUS_ACK = 0x5a,
74 INT_ENABLE = 0x5c,
75 INT_STATUS = 0x5e, /* Unused */
76 TX_STATUS = 0x60,
77 MAC_CTRL = 0x6c,
78 VLAN_TAG = 0x70, /* Unused */
79 PHY_SET = 0x75,
80 PHY_CTRL = 0x76,
81 STATION_ADDRESS_0 = 0x78,
82 STATION_ADDRESS_1 = 0x7a,
83 STATION_ADDRESS_2 = 0x7c,
84 MAX_FRAME_SIZE = 0x86,
85 RECEIVE_MODE = 0x88,
86 HASHTABLE_0 = 0x8c,
87 HASHTABLE_1 = 0x90,
88 RMON_STATISTICS_MASK = 0x98,
89 STATISTICS_MASK = 0x9c,
90 RX_JUMBO_FRAMES = 0xbc, /* Unused */
91 TCP_CHECKSUM_ERRORS = 0xc0, /* Unused */
92 IP_CHECKSUM_ERRORS = 0xc2, /* Unused */
93 UDP_CHECKSUM_ERRORS = 0xc4, /* Unused */
94 TX_JUMBO_FRAMES = 0xf4 /* Unused */
95};
96
97/* Ethernet MIB statistic register offsets. */
98#define IPG_OCTETRCVOK 0xA8
99#define IPG_MCSTOCTETRCVDOK 0xAC
100#define IPG_BCSTOCTETRCVOK 0xB0
101#define IPG_FRAMESRCVDOK 0xB4
102#define IPG_MCSTFRAMESRCVDOK 0xB8
103#define IPG_BCSTFRAMESRCVDOK 0xBE
104#define IPG_MACCONTROLFRAMESRCVD 0xC6
105#define IPG_FRAMETOOLONGERRORS 0xC8
106#define IPG_INRANGELENGTHERRORS 0xCA
107#define IPG_FRAMECHECKSEQERRORS 0xCC
108#define IPG_FRAMESLOSTRXERRORS 0xCE
109#define IPG_OCTETXMTOK 0xD0
110#define IPG_MCSTOCTETXMTOK 0xD4
111#define IPG_BCSTOCTETXMTOK 0xD8
112#define IPG_FRAMESXMTDOK 0xDC
113#define IPG_MCSTFRAMESXMTDOK 0xE0
114#define IPG_FRAMESWDEFERREDXMT 0xE4
115#define IPG_LATECOLLISIONS 0xE8
116#define IPG_MULTICOLFRAMES 0xEC
117#define IPG_SINGLECOLFRAMES 0xF0
118#define IPG_BCSTFRAMESXMTDOK 0xF6
119#define IPG_CARRIERSENSEERRORS 0xF8
120#define IPG_MACCONTROLFRAMESXMTDOK 0xFA
121#define IPG_FRAMESABORTXSCOLLS 0xFC
122#define IPG_FRAMESWEXDEFERRAL 0xFE
123
124/* RMON statistic register offsets. */
125#define IPG_ETHERSTATSCOLLISIONS 0x100
126#define IPG_ETHERSTATSOCTETSTRANSMIT 0x104
127#define IPG_ETHERSTATSPKTSTRANSMIT 0x108
128#define IPG_ETHERSTATSPKTS64OCTESTSTRANSMIT 0x10C
129#define IPG_ETHERSTATSPKTS65TO127OCTESTSTRANSMIT 0x110
130#define IPG_ETHERSTATSPKTS128TO255OCTESTSTRANSMIT 0x114
131#define IPG_ETHERSTATSPKTS256TO511OCTESTSTRANSMIT 0x118
132#define IPG_ETHERSTATSPKTS512TO1023OCTESTSTRANSMIT 0x11C
133#define IPG_ETHERSTATSPKTS1024TO1518OCTESTSTRANSMIT 0x120
134#define IPG_ETHERSTATSCRCALIGNERRORS 0x124
135#define IPG_ETHERSTATSUNDERSIZEPKTS 0x128
136#define IPG_ETHERSTATSFRAGMENTS 0x12C
137#define IPG_ETHERSTATSJABBERS 0x130
138#define IPG_ETHERSTATSOCTETS 0x134
139#define IPG_ETHERSTATSPKTS 0x138
140#define IPG_ETHERSTATSPKTS64OCTESTS 0x13C
141#define IPG_ETHERSTATSPKTS65TO127OCTESTS 0x140
142#define IPG_ETHERSTATSPKTS128TO255OCTESTS 0x144
143#define IPG_ETHERSTATSPKTS256TO511OCTESTS 0x148
144#define IPG_ETHERSTATSPKTS512TO1023OCTESTS 0x14C
145#define IPG_ETHERSTATSPKTS1024TO1518OCTESTS 0x150
146
147/* RMON statistic register equivalents. */
148#define IPG_ETHERSTATSMULTICASTPKTSTRANSMIT 0xE0
149#define IPG_ETHERSTATSBROADCASTPKTSTRANSMIT 0xF6
150#define IPG_ETHERSTATSMULTICASTPKTS 0xB8
151#define IPG_ETHERSTATSBROADCASTPKTS 0xBE
152#define IPG_ETHERSTATSOVERSIZEPKTS 0xC8
153#define IPG_ETHERSTATSDROPEVENTS 0xCE
154
155/* Serial EEPROM offsets */
156#define IPG_EEPROM_CONFIGPARAM 0x00
157#define IPG_EEPROM_ASICCTRL 0x01
158#define IPG_EEPROM_SUBSYSTEMVENDORID 0x02
159#define IPG_EEPROM_SUBSYSTEMID 0x03
160#define IPG_EEPROM_STATIONADDRESS0 0x10
161#define IPG_EEPROM_STATIONADDRESS1 0x11
162#define IPG_EEPROM_STATIONADDRESS2 0x12
163
164/* Register & data structure bit masks */
165
166/* PCI register masks. */
167
168/* IOBaseAddress */
169#define IPG_PIB_RSVD_MASK 0xFFFFFE01
170#define IPG_PIB_IOBASEADDRESS 0xFFFFFF00
171#define IPG_PIB_IOBASEADDRIND 0x00000001
172
173/* MemBaseAddress */
174#define IPG_PMB_RSVD_MASK 0xFFFFFE07
175#define IPG_PMB_MEMBASEADDRIND 0x00000001
176#define IPG_PMB_MEMMAPTYPE 0x00000006
177#define IPG_PMB_MEMMAPTYPE0 0x00000002
178#define IPG_PMB_MEMMAPTYPE1 0x00000004
179#define IPG_PMB_MEMBASEADDRESS 0xFFFFFE00
180
181/* ConfigStatus */
182#define IPG_CS_RSVD_MASK 0xFFB0
183#define IPG_CS_CAPABILITIES 0x0010
184#define IPG_CS_66MHZCAPABLE 0x0020
185#define IPG_CS_FASTBACK2BACK 0x0080
186#define IPG_CS_DATAPARITYREPORTED 0x0100
187#define IPG_CS_DEVSELTIMING 0x0600
188#define IPG_CS_SIGNALEDTARGETABORT 0x0800
189#define IPG_CS_RECEIVEDTARGETABORT 0x1000
190#define IPG_CS_RECEIVEDMASTERABORT 0x2000
191#define IPG_CS_SIGNALEDSYSTEMERROR 0x4000
192#define IPG_CS_DETECTEDPARITYERROR 0x8000
193
194/* TFD data structure masks. */
195
196/* TFDList, TFC */
197#define IPG_TFC_RSVD_MASK 0x0000FFFF9FFFFFFFULL
198#define IPG_TFC_FRAMEID 0x000000000000FFFFULL
199#define IPG_TFC_WORDALIGN 0x0000000000030000ULL
200#define IPG_TFC_WORDALIGNTODWORD 0x0000000000000000ULL
201#define IPG_TFC_WORDALIGNTOWORD 0x0000000000020000ULL
202#define IPG_TFC_WORDALIGNDISABLED 0x0000000000030000ULL
203#define IPG_TFC_TCPCHECKSUMENABLE 0x0000000000040000ULL
204#define IPG_TFC_UDPCHECKSUMENABLE 0x0000000000080000ULL
205#define IPG_TFC_IPCHECKSUMENABLE 0x0000000000100000ULL
206#define IPG_TFC_FCSAPPENDDISABLE 0x0000000000200000ULL
207#define IPG_TFC_TXINDICATE 0x0000000000400000ULL
208#define IPG_TFC_TXDMAINDICATE 0x0000000000800000ULL
209#define IPG_TFC_FRAGCOUNT 0x000000000F000000ULL
210#define IPG_TFC_VLANTAGINSERT 0x0000000010000000ULL
211#define IPG_TFC_TFDDONE 0x0000000080000000ULL
212#define IPG_TFC_VID 0x00000FFF00000000ULL
213#define IPG_TFC_CFI 0x0000100000000000ULL
214#define IPG_TFC_USERPRIORITY 0x0000E00000000000ULL
215
216/* TFDList, FragInfo */
217#define IPG_TFI_RSVD_MASK 0xFFFF00FFFFFFFFFFULL
218#define IPG_TFI_FRAGADDR 0x000000FFFFFFFFFFULL
219#define IPG_TFI_FRAGLEN 0xFFFF000000000000ULL
220
221/* RFD data structure masks. */
222
223/* RFDList, RFS */
224#define IPG_RFS_RSVD_MASK 0x0000FFFFFFFFFFFFULL
225#define IPG_RFS_RXFRAMELEN 0x000000000000FFFFULL
226#define IPG_RFS_RXFIFOOVERRUN 0x0000000000010000ULL
227#define IPG_RFS_RXRUNTFRAME 0x0000000000020000ULL
228#define IPG_RFS_RXALIGNMENTERROR 0x0000000000040000ULL
229#define IPG_RFS_RXFCSERROR 0x0000000000080000ULL
230#define IPG_RFS_RXOVERSIZEDFRAME 0x0000000000100000ULL
231#define IPG_RFS_RXLENGTHERROR 0x0000000000200000ULL
232#define IPG_RFS_VLANDETECTED 0x0000000000400000ULL
233#define IPG_RFS_TCPDETECTED 0x0000000000800000ULL
234#define IPG_RFS_TCPERROR 0x0000000001000000ULL
235#define IPG_RFS_UDPDETECTED 0x0000000002000000ULL
236#define IPG_RFS_UDPERROR 0x0000000004000000ULL
237#define IPG_RFS_IPDETECTED 0x0000000008000000ULL
238#define IPG_RFS_IPERROR 0x0000000010000000ULL
239#define IPG_RFS_FRAMESTART 0x0000000020000000ULL
240#define IPG_RFS_FRAMEEND 0x0000000040000000ULL
241#define IPG_RFS_RFDDONE 0x0000000080000000ULL
242#define IPG_RFS_TCI 0x0000FFFF00000000ULL
243
244/* RFDList, FragInfo */
245#define IPG_RFI_RSVD_MASK 0xFFFF00FFFFFFFFFFULL
246#define IPG_RFI_FRAGADDR 0x000000FFFFFFFFFFULL
247#define IPG_RFI_FRAGLEN 0xFFFF000000000000ULL
248
249/* I/O Register masks. */
250
251/* RMON Statistics Mask */
252#define IPG_RZ_ALL 0x0FFFFFFF
253
254/* Statistics Mask */
255#define IPG_SM_ALL 0x0FFFFFFF
256#define IPG_SM_OCTETRCVOK_FRAMESRCVDOK 0x00000001
257#define IPG_SM_MCSTOCTETRCVDOK_MCSTFRAMESRCVDOK 0x00000002
258#define IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK 0x00000004
259#define IPG_SM_RXJUMBOFRAMES 0x00000008
260#define IPG_SM_TCPCHECKSUMERRORS 0x00000010
261#define IPG_SM_IPCHECKSUMERRORS 0x00000020
262#define IPG_SM_UDPCHECKSUMERRORS 0x00000040
263#define IPG_SM_MACCONTROLFRAMESRCVD 0x00000080
264#define IPG_SM_FRAMESTOOLONGERRORS 0x00000100
265#define IPG_SM_INRANGELENGTHERRORS 0x00000200
266#define IPG_SM_FRAMECHECKSEQERRORS 0x00000400
267#define IPG_SM_FRAMESLOSTRXERRORS 0x00000800
268#define IPG_SM_OCTETXMTOK_FRAMESXMTOK 0x00001000
269#define IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK 0x00002000
270#define IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK 0x00004000
271#define IPG_SM_FRAMESWDEFERREDXMT 0x00008000
272#define IPG_SM_LATECOLLISIONS 0x00010000
273#define IPG_SM_MULTICOLFRAMES 0x00020000
274#define IPG_SM_SINGLECOLFRAMES 0x00040000
275#define IPG_SM_TXJUMBOFRAMES 0x00080000
276#define IPG_SM_CARRIERSENSEERRORS 0x00100000
277#define IPG_SM_MACCONTROLFRAMESXMTD 0x00200000
278#define IPG_SM_FRAMESABORTXSCOLLS 0x00400000
279#define IPG_SM_FRAMESWEXDEFERAL 0x00800000
280
281/* Countdown */
282#define IPG_CD_RSVD_MASK 0x0700FFFF
283#define IPG_CD_COUNT 0x0000FFFF
284#define IPG_CD_COUNTDOWNSPEED 0x01000000
285#define IPG_CD_COUNTDOWNMODE 0x02000000
286#define IPG_CD_COUNTINTENABLED 0x04000000
287
288/* TxDMABurstThresh */
289#define IPG_TB_RSVD_MASK 0xFF
290
291/* TxDMAUrgentThresh */
292#define IPG_TU_RSVD_MASK 0xFF
293
294/* TxDMAPollPeriod */
295#define IPG_TP_RSVD_MASK 0xFF
296
297/* RxDMAUrgentThresh */
298#define IPG_RU_RSVD_MASK 0xFF
299
300/* RxDMAPollPeriod */
301#define IPG_RP_RSVD_MASK 0xFF
302
303/* ReceiveMode */
304#define IPG_RM_RSVD_MASK 0x3F
305#define IPG_RM_RECEIVEUNICAST 0x01
306#define IPG_RM_RECEIVEMULTICAST 0x02
307#define IPG_RM_RECEIVEBROADCAST 0x04
308#define IPG_RM_RECEIVEALLFRAMES 0x08
309#define IPG_RM_RECEIVEMULTICASTHASH 0x10
310#define IPG_RM_RECEIVEIPMULTICAST 0x20
311
312/* PhySet */
313#define IPG_PS_MEM_LENB9B 0x01
314#define IPG_PS_MEM_LEN9 0x02
315#define IPG_PS_NON_COMPDET 0x04
316
317/* PhyCtrl */
318#define IPG_PC_RSVD_MASK 0xFF
319#define IPG_PC_MGMTCLK_LO 0x00
320#define IPG_PC_MGMTCLK_HI 0x01
321#define IPG_PC_MGMTCLK 0x01
322#define IPG_PC_MGMTDATA 0x02
323#define IPG_PC_MGMTDIR 0x04
324#define IPG_PC_DUPLEX_POLARITY 0x08
325#define IPG_PC_DUPLEX_STATUS 0x10
326#define IPG_PC_LINK_POLARITY 0x20
327#define IPG_PC_LINK_SPEED 0xC0
328#define IPG_PC_LINK_SPEED_10MBPS 0x40
329#define IPG_PC_LINK_SPEED_100MBPS 0x80
330#define IPG_PC_LINK_SPEED_1000MBPS 0xC0
331
332/* DMACtrl */
333#define IPG_DC_RSVD_MASK 0xC07D9818
334#define IPG_DC_RX_DMA_COMPLETE 0x00000008
335#define IPG_DC_RX_DMA_POLL_NOW 0x00000010
336#define IPG_DC_TX_DMA_COMPLETE 0x00000800
337#define IPG_DC_TX_DMA_POLL_NOW 0x00001000
338#define IPG_DC_TX_DMA_IN_PROG 0x00008000
339#define IPG_DC_RX_EARLY_DISABLE 0x00010000
340#define IPG_DC_MWI_DISABLE 0x00040000
341#define IPG_DC_TX_WRITE_BACK_DISABLE 0x00080000
342#define IPG_DC_TX_BURST_LIMIT 0x00700000
343#define IPG_DC_TARGET_ABORT 0x40000000
344#define IPG_DC_MASTER_ABORT 0x80000000
345
346/* ASICCtrl */
347#define IPG_AC_RSVD_MASK 0x07FFEFF2
348#define IPG_AC_EXP_ROM_SIZE 0x00000002
349#define IPG_AC_PHY_SPEED10 0x00000010
350#define IPG_AC_PHY_SPEED100 0x00000020
351#define IPG_AC_PHY_SPEED1000 0x00000040
352#define IPG_AC_PHY_MEDIA 0x00000080
353#define IPG_AC_FORCED_CFG 0x00000700
354#define IPG_AC_D3RESETDISABLE 0x00000800
355#define IPG_AC_SPEED_UP_MODE 0x00002000
356#define IPG_AC_LED_MODE 0x00004000
357#define IPG_AC_RST_OUT_POLARITY 0x00008000
358#define IPG_AC_GLOBAL_RESET 0x00010000
359#define IPG_AC_RX_RESET 0x00020000
360#define IPG_AC_TX_RESET 0x00040000
361#define IPG_AC_DMA 0x00080000
362#define IPG_AC_FIFO 0x00100000
363#define IPG_AC_NETWORK 0x00200000
364#define IPG_AC_HOST 0x00400000
365#define IPG_AC_AUTO_INIT 0x00800000
366#define IPG_AC_RST_OUT 0x01000000
367#define IPG_AC_INT_REQUEST 0x02000000
368#define IPG_AC_RESET_BUSY 0x04000000
369#define IPG_AC_LED_SPEED 0x08000000
370#define IPG_AC_LED_MODE_BIT_1 0x20000000
371
372/* EepromCtrl */
373#define IPG_EC_RSVD_MASK 0x83FF
374#define IPG_EC_EEPROM_ADDR 0x00FF
375#define IPG_EC_EEPROM_OPCODE 0x0300
376#define IPG_EC_EEPROM_SUBCOMMAD 0x0000
377#define IPG_EC_EEPROM_WRITEOPCODE 0x0100
378#define IPG_EC_EEPROM_READOPCODE 0x0200
379#define IPG_EC_EEPROM_ERASEOPCODE 0x0300
380#define IPG_EC_EEPROM_BUSY 0x8000
381
382/* FIFOCtrl */
383#define IPG_FC_RSVD_MASK 0xC001
384#define IPG_FC_RAM_TEST_MODE 0x0001
385#define IPG_FC_TRANSMITTING 0x4000
386#define IPG_FC_RECEIVING 0x8000
387
388/* TxStatus */
389#define IPG_TS_RSVD_MASK 0xFFFF00DD
390#define IPG_TS_TX_ERROR 0x00000001
391#define IPG_TS_LATE_COLLISION 0x00000004
392#define IPG_TS_TX_MAX_COLL 0x00000008
393#define IPG_TS_TX_UNDERRUN 0x00000010
394#define IPG_TS_TX_IND_REQD 0x00000040
395#define IPG_TS_TX_COMPLETE 0x00000080
396#define IPG_TS_TX_FRAMEID 0xFFFF0000
397
398/* WakeEvent */
399#define IPG_WE_WAKE_PKT_ENABLE 0x01
400#define IPG_WE_MAGIC_PKT_ENABLE 0x02
401#define IPG_WE_LINK_EVT_ENABLE 0x04
402#define IPG_WE_WAKE_POLARITY 0x08
403#define IPG_WE_WAKE_PKT_EVT 0x10
404#define IPG_WE_MAGIC_PKT_EVT 0x20
405#define IPG_WE_LINK_EVT 0x40
406#define IPG_WE_WOL_ENABLE 0x80
407
408/* IntEnable */
409#define IPG_IE_RSVD_MASK 0x1FFE
410#define IPG_IE_HOST_ERROR 0x0002
411#define IPG_IE_TX_COMPLETE 0x0004
412#define IPG_IE_MAC_CTRL_FRAME 0x0008
413#define IPG_IE_RX_COMPLETE 0x0010
414#define IPG_IE_RX_EARLY 0x0020
415#define IPG_IE_INT_REQUESTED 0x0040
416#define IPG_IE_UPDATE_STATS 0x0080
417#define IPG_IE_LINK_EVENT 0x0100
418#define IPG_IE_TX_DMA_COMPLETE 0x0200
419#define IPG_IE_RX_DMA_COMPLETE 0x0400
420#define IPG_IE_RFD_LIST_END 0x0800
421#define IPG_IE_RX_DMA_PRIORITY 0x1000
422
423/* IntStatus */
424#define IPG_IS_RSVD_MASK 0x1FFF
425#define IPG_IS_INTERRUPT_STATUS 0x0001
426#define IPG_IS_HOST_ERROR 0x0002
427#define IPG_IS_TX_COMPLETE 0x0004
428#define IPG_IS_MAC_CTRL_FRAME 0x0008
429#define IPG_IS_RX_COMPLETE 0x0010
430#define IPG_IS_RX_EARLY 0x0020
431#define IPG_IS_INT_REQUESTED 0x0040
432#define IPG_IS_UPDATE_STATS 0x0080
433#define IPG_IS_LINK_EVENT 0x0100
434#define IPG_IS_TX_DMA_COMPLETE 0x0200
435#define IPG_IS_RX_DMA_COMPLETE 0x0400
436#define IPG_IS_RFD_LIST_END 0x0800
437#define IPG_IS_RX_DMA_PRIORITY 0x1000
438
439/* MACCtrl */
440#define IPG_MC_RSVD_MASK 0x7FE33FA3
441#define IPG_MC_IFS_SELECT 0x00000003
442#define IPG_MC_IFS_4352BIT 0x00000003
443#define IPG_MC_IFS_1792BIT 0x00000002
444#define IPG_MC_IFS_1024BIT 0x00000001
445#define IPG_MC_IFS_96BIT 0x00000000
446#define IPG_MC_DUPLEX_SELECT 0x00000020
447#define IPG_MC_DUPLEX_SELECT_FD 0x00000020
448#define IPG_MC_DUPLEX_SELECT_HD 0x00000000
449#define IPG_MC_TX_FLOW_CONTROL_ENABLE 0x00000080
450#define IPG_MC_RX_FLOW_CONTROL_ENABLE 0x00000100
451#define IPG_MC_RCV_FCS 0x00000200
452#define IPG_MC_FIFO_LOOPBACK 0x00000400
453#define IPG_MC_MAC_LOOPBACK 0x00000800
454#define IPG_MC_AUTO_VLAN_TAGGING 0x00001000
455#define IPG_MC_AUTO_VLAN_UNTAGGING 0x00002000
456#define IPG_MC_COLLISION_DETECT 0x00010000
457#define IPG_MC_CARRIER_SENSE 0x00020000
458#define IPG_MC_STATISTICS_ENABLE 0x00200000
459#define IPG_MC_STATISTICS_DISABLE 0x00400000
460#define IPG_MC_STATISTICS_ENABLED 0x00800000
461#define IPG_MC_TX_ENABLE 0x01000000
462#define IPG_MC_TX_DISABLE 0x02000000
463#define IPG_MC_TX_ENABLED 0x04000000
464#define IPG_MC_RX_ENABLE 0x08000000
465#define IPG_MC_RX_DISABLE 0x10000000
466#define IPG_MC_RX_ENABLED 0x20000000
467#define IPG_MC_PAUSED 0x40000000
468
469/*
470 * Tune
471 */
472
473/* Assign IPG_APPEND_FCS_ON_TX > 0 for auto FCS append on TX. */
474#define IPG_APPEND_FCS_ON_TX 1
475
476/* Assign IPG_APPEND_FCS_ON_TX > 0 for auto FCS strip on RX. */
477#define IPG_STRIP_FCS_ON_RX 1
478
479/* Assign IPG_DROP_ON_RX_ETH_ERRORS > 0 to drop RX frames with
480 * Ethernet errors.
481 */
482#define IPG_DROP_ON_RX_ETH_ERRORS 1
483
484/* Assign IPG_INSERT_MANUAL_VLAN_TAG > 0 to insert VLAN tags manually
485 * (via TFC).
486 */
487#define IPG_INSERT_MANUAL_VLAN_TAG 0
488
489/* Assign IPG_ADD_IPCHECKSUM_ON_TX > 0 for auto IP checksum on TX. */
490#define IPG_ADD_IPCHECKSUM_ON_TX 0
491
492/* Assign IPG_ADD_TCPCHECKSUM_ON_TX > 0 for auto TCP checksum on TX.
493 * DO NOT USE FOR SILICON REVISIONS B3 AND EARLIER.
494 */
495#define IPG_ADD_TCPCHECKSUM_ON_TX 0
496
497/* Assign IPG_ADD_UDPCHECKSUM_ON_TX > 0 for auto UDP checksum on TX.
498 * DO NOT USE FOR SILICON REVISIONS B3 AND EARLIER.
499 */
500#define IPG_ADD_UDPCHECKSUM_ON_TX 0
501
502/* If inserting VLAN tags manually, assign the IPG_MANUAL_VLAN_xx
503 * constants as desired.
504 */
505#define IPG_MANUAL_VLAN_VID 0xABC
506#define IPG_MANUAL_VLAN_CFI 0x1
507#define IPG_MANUAL_VLAN_USERPRIORITY 0x5
508
509#define IPG_IO_REG_RANGE 0xFF
510#define IPG_MEM_REG_RANGE 0x154
511#define IPG_DRIVER_NAME "Sundance Technology IPG Triple-Speed Ethernet"
512#define IPG_NIC_PHY_ADDRESS 0x01
513#define IPG_DMALIST_ALIGN_PAD 0x07
514#define IPG_MULTICAST_HASHTABLE_SIZE 0x40
515
516/* Number of milliseconds to wait after issuing a software reset.
517 * 0x05 <= IPG_AC_RESETWAIT to account for proper 10Mbps operation.
518 */
519#define IPG_AC_RESETWAIT 0x05
520
521/* Number of IPG_AC_RESETWAIT timeperiods before declaring timeout. */
522#define IPG_AC_RESET_TIMEOUT 0x0A
523
524/* Minimum number of nanoseconds used to toggle MDC clock during
525 * MII/GMII register access.
526 */
527#define IPG_PC_PHYCTRLWAIT_NS 200
528
529#define IPG_TFDLIST_LENGTH 0x100
530
531/* Number of frames between TxDMAComplete interrupt.
532 * 0 < IPG_FRAMESBETWEENTXDMACOMPLETES <= IPG_TFDLIST_LENGTH
533 */
534#define IPG_FRAMESBETWEENTXDMACOMPLETES 0x1
535
536#define IPG_RFDLIST_LENGTH 0x100
537
538/* Maximum number of RFDs to process per interrupt.
539 * 1 < IPG_MAXRFDPROCESS_COUNT < IPG_RFDLIST_LENGTH
540 */
541#define IPG_MAXRFDPROCESS_COUNT 0x80
542
543/* Minimum margin between last freed RFD, and current RFD.
544 * 1 < IPG_MINUSEDRFDSTOFREE < IPG_RFDLIST_LENGTH
545 */
546#define IPG_MINUSEDRFDSTOFREE 0x80
547
548/* specify the jumbo frame maximum size
549 * per unit is 0x600 (the rx_buffer size that one RFD can carry)
550 */
551#define MAX_JUMBOSIZE 0x8 /* max is 12K */
552
553/* Key register values loaded at driver start up. */
554
555/* TXDMAPollPeriod is specified in 320ns increments.
556 *
557 * Value Time
558 * ---------------------
559 * 0x00-0x01 320ns
560 * 0x03 ~1us
561 * 0x1F ~10us
562 * 0xFF ~82us
563 */
564#define IPG_TXDMAPOLLPERIOD_VALUE 0x26
565
566/* TxDMAUrgentThresh specifies the minimum amount of
567 * data in the transmit FIFO before asserting an
568 * urgent transmit DMA request.
569 *
570 * Value Min TxFIFO occupied space before urgent TX request
571 * ---------------------------------------------------------------
572 * 0x00-0x04 128 bytes (1024 bits)
573 * 0x27 1248 bytes (~10000 bits)
574 * 0x30 1536 bytes (12288 bits)
575 * 0xFF 8192 bytes (65535 bits)
576 */
577#define IPG_TXDMAURGENTTHRESH_VALUE 0x04
578
579/* TxDMABurstThresh specifies the minimum amount of
580 * free space in the transmit FIFO before asserting an
581 * transmit DMA request.
582 *
583 * Value Min TxFIFO free space before TX request
584 * ----------------------------------------------------
585 * 0x00-0x08 256 bytes
586 * 0x30 1536 bytes
587 * 0xFF 8192 bytes
588 */
589#define IPG_TXDMABURSTTHRESH_VALUE 0x30
590
591/* RXDMAPollPeriod is specified in 320ns increments.
592 *
593 * Value Time
594 * ---------------------
595 * 0x00-0x01 320ns
596 * 0x03 ~1us
597 * 0x1F ~10us
598 * 0xFF ~82us
599 */
600#define IPG_RXDMAPOLLPERIOD_VALUE 0x01
601
602/* RxDMAUrgentThresh specifies the minimum amount of
603 * free space within the receive FIFO before asserting
604 * a urgent receive DMA request.
605 *
606 * Value Min RxFIFO free space before urgent RX request
607 * ---------------------------------------------------------------
608 * 0x00-0x04 128 bytes (1024 bits)
609 * 0x27 1248 bytes (~10000 bits)
610 * 0x30 1536 bytes (12288 bits)
611 * 0xFF 8192 bytes (65535 bits)
612 */
613#define IPG_RXDMAURGENTTHRESH_VALUE 0x30
614
615/* RxDMABurstThresh specifies the minimum amount of
616 * occupied space within the receive FIFO before asserting
617 * a receive DMA request.
618 *
619 * Value Min TxFIFO free space before TX request
620 * ----------------------------------------------------
621 * 0x00-0x08 256 bytes
622 * 0x30 1536 bytes
623 * 0xFF 8192 bytes
624 */
625#define IPG_RXDMABURSTTHRESH_VALUE 0x30
626
627/* FlowOnThresh specifies the maximum amount of occupied
628 * space in the receive FIFO before a PAUSE frame with
629 * maximum pause time transmitted.
630 *
631 * Value Max RxFIFO occupied space before PAUSE
632 * ---------------------------------------------------
633 * 0x0000 0 bytes
634 * 0x0740 29,696 bytes
635 * 0x07FF 32,752 bytes
636 */
637#define IPG_FLOWONTHRESH_VALUE 0x0740
638
639/* FlowOffThresh specifies the minimum amount of occupied
640 * space in the receive FIFO before a PAUSE frame with
641 * zero pause time is transmitted.
642 *
643 * Value Max RxFIFO occupied space before PAUSE
644 * ---------------------------------------------------
645 * 0x0000 0 bytes
646 * 0x00BF 3056 bytes
647 * 0x07FF 32,752 bytes
648 */
649#define IPG_FLOWOFFTHRESH_VALUE 0x00BF
650
651/*
652 * Miscellaneous macros.
653 */
654
655/* Macros for printing debug statements. */
656#ifdef IPG_DEBUG
657# define IPG_DEBUG_MSG(fmt, args...) \
658do { \
659 if (0) \
660 printk(KERN_DEBUG "IPG: " fmt, ##args); \
661} while (0)
662# define IPG_DDEBUG_MSG(fmt, args...) \
663 printk(KERN_DEBUG "IPG: " fmt, ##args)
664# define IPG_DUMPRFDLIST(args) ipg_dump_rfdlist(args)
665# define IPG_DUMPTFDLIST(args) ipg_dump_tfdlist(args)
666#else
667# define IPG_DEBUG_MSG(fmt, args...) \
668do { \
669 if (0) \
670 printk(KERN_DEBUG "IPG: " fmt, ##args); \
671} while (0)
672# define IPG_DDEBUG_MSG(fmt, args...) \
673do { \
674 if (0) \
675 printk(KERN_DEBUG "IPG: " fmt, ##args); \
676} while (0)
677# define IPG_DUMPRFDLIST(args)
678# define IPG_DUMPTFDLIST(args)
679#endif
680
681/*
682 * End miscellaneous macros.
683 */
684
685/* Transmit Frame Descriptor. The IPG supports 15 fragments,
686 * however Linux requires only a single fragment. Note, each
687 * TFD field is 64 bits wide.
688 */
689struct ipg_tx {
690 __le64 next_desc;
691 __le64 tfc;
692 __le64 frag_info;
693};
694
695/* Receive Frame Descriptor. Note, each RFD field is 64 bits wide.
696 */
697struct ipg_rx {
698 __le64 next_desc;
699 __le64 rfs;
700 __le64 frag_info;
701};
702
703struct ipg_jumbo {
704 int found_start;
705 int current_size;
706 struct sk_buff *skb;
707};
708
709/* Structure of IPG NIC specific data. */
710struct ipg_nic_private {
711 void __iomem *ioaddr;
712 struct ipg_tx *txd;
713 struct ipg_rx *rxd;
714 dma_addr_t txd_map;
715 dma_addr_t rxd_map;
716 struct sk_buff *tx_buff[IPG_TFDLIST_LENGTH];
717 struct sk_buff *rx_buff[IPG_RFDLIST_LENGTH];
718 unsigned int tx_current;
719 unsigned int tx_dirty;
720 unsigned int rx_current;
721 unsigned int rx_dirty;
722 bool is_jumbo;
723 struct ipg_jumbo jumbo;
724 unsigned long rxfrag_size;
725 unsigned long rxsupport_size;
726 unsigned long max_rxframe_size;
727 unsigned int rx_buf_sz;
728 struct pci_dev *pdev;
729 struct net_device *dev;
730 struct net_device_stats stats;
731 spinlock_t lock;
732 int tenmbpsmode;
733
734 u16 led_mode;
735 u16 station_addr[3]; /* Station Address in EEPROM Reg 0x10..0x12 */
736
737 struct mutex mii_mutex;
738 struct mii_if_info mii_if;
739 int reset_current_tfd;
740#ifdef IPG_DEBUG
741 int RFDlistendCount;
742 int RFDListCheckedCount;
743 int EmptyRFDListCount;
744#endif
745 struct delayed_work task;
746};
747
748#endif /* __LINUX_IPG_H */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 639263d5e833..7781e80896a6 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -627,8 +627,10 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev)
627 627
628 /* verify the skb head is not shared */ 628 /* verify the skb head is not shared */
629 err = skb_cow_head(skb, 0); 629 err = skb_cow_head(skb, 0);
630 if (err) 630 if (err) {
631 dev_kfree_skb(skb);
631 return NETDEV_TX_OK; 632 return NETDEV_TX_OK;
633 }
632 634
633 /* locate vlan header */ 635 /* locate vlan header */
634 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); 636 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index e84c7f2634d3..ed622fa29dfa 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -36,7 +36,7 @@
36 36
37/* Registers */ 37/* Registers */
38#define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) 38#define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
39#define MVNETA_RXQ_HW_BUF_ALLOC BIT(1) 39#define MVNETA_RXQ_HW_BUF_ALLOC BIT(0)
40#define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8) 40#define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
41#define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8) 41#define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
42#define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2)) 42#define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
@@ -62,6 +62,7 @@
62#define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3)) 62#define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
63#define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2)) 63#define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
64#define MVNETA_BASE_ADDR_ENABLE 0x2290 64#define MVNETA_BASE_ADDR_ENABLE 0x2290
65#define MVNETA_ACCESS_PROTECT_ENABLE 0x2294
65#define MVNETA_PORT_CONFIG 0x2400 66#define MVNETA_PORT_CONFIG 0x2400
66#define MVNETA_UNI_PROMISC_MODE BIT(0) 67#define MVNETA_UNI_PROMISC_MODE BIT(0)
67#define MVNETA_DEF_RXQ(q) ((q) << 1) 68#define MVNETA_DEF_RXQ(q) ((q) << 1)
@@ -159,7 +160,7 @@
159 160
160#define MVNETA_INTR_ENABLE 0x25b8 161#define MVNETA_INTR_ENABLE 0x25b8
161#define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00 162#define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
162#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000 // note: neta says it's 0x000000FF 163#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0x000000ff
163 164
164#define MVNETA_RXQ_CMD 0x2680 165#define MVNETA_RXQ_CMD 0x2680
165#define MVNETA_RXQ_DISABLE_SHIFT 8 166#define MVNETA_RXQ_DISABLE_SHIFT 8
@@ -242,6 +243,7 @@
242#define MVNETA_VLAN_TAG_LEN 4 243#define MVNETA_VLAN_TAG_LEN 4
243 244
244#define MVNETA_CPU_D_CACHE_LINE_SIZE 32 245#define MVNETA_CPU_D_CACHE_LINE_SIZE 32
246#define MVNETA_TX_CSUM_DEF_SIZE 1600
245#define MVNETA_TX_CSUM_MAX_SIZE 9800 247#define MVNETA_TX_CSUM_MAX_SIZE 9800
246#define MVNETA_ACC_MODE_EXT 1 248#define MVNETA_ACC_MODE_EXT 1
247 249
@@ -1579,12 +1581,16 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1579 } 1581 }
1580 1582
1581 skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size); 1583 skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size);
1582 if (!skb)
1583 goto err_drop_frame;
1584 1584
1585 /* After refill old buffer has to be unmapped regardless
1586 * the skb is successfully built or not.
1587 */
1585 dma_unmap_single(dev->dev.parent, phys_addr, 1588 dma_unmap_single(dev->dev.parent, phys_addr,
1586 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); 1589 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
1587 1590
1591 if (!skb)
1592 goto err_drop_frame;
1593
1588 rcvd_pkts++; 1594 rcvd_pkts++;
1589 rcvd_bytes += rx_bytes; 1595 rcvd_bytes += rx_bytes;
1590 1596
@@ -3191,6 +3197,7 @@ static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
3191 } 3197 }
3192 3198
3193 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); 3199 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
3200 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
3194} 3201}
3195 3202
3196/* Power up the port */ 3203/* Power up the port */
@@ -3250,6 +3257,7 @@ static int mvneta_probe(struct platform_device *pdev)
3250 char hw_mac_addr[ETH_ALEN]; 3257 char hw_mac_addr[ETH_ALEN];
3251 const char *mac_from; 3258 const char *mac_from;
3252 const char *managed; 3259 const char *managed;
3260 int tx_csum_limit;
3253 int phy_mode; 3261 int phy_mode;
3254 int err; 3262 int err;
3255 int cpu; 3263 int cpu;
@@ -3350,8 +3358,21 @@ static int mvneta_probe(struct platform_device *pdev)
3350 } 3358 }
3351 } 3359 }
3352 3360
3353 if (of_device_is_compatible(dn, "marvell,armada-370-neta")) 3361 if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) {
3354 pp->tx_csum_limit = 1600; 3362 if (tx_csum_limit < 0 ||
3363 tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) {
3364 tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
3365 dev_info(&pdev->dev,
3366 "Wrong TX csum limit in DT, set to %dB\n",
3367 MVNETA_TX_CSUM_DEF_SIZE);
3368 }
3369 } else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) {
3370 tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
3371 } else {
3372 tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE;
3373 }
3374
3375 pp->tx_csum_limit = tx_csum_limit;
3355 3376
3356 pp->tx_ring_size = MVNETA_MAX_TXD; 3377 pp->tx_ring_size = MVNETA_MAX_TXD;
3357 pp->rx_ring_size = MVNETA_MAX_RXD; 3378 pp->rx_ring_size = MVNETA_MAX_RXD;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 85f1b1e7e505..31c491e02e69 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -892,9 +892,10 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
892 dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn; 892 dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn;
893 dev->caps.port_mask[i] = dev->caps.port_type[i]; 893 dev->caps.port_mask[i] = dev->caps.port_type[i];
894 dev->caps.phys_port_id[i] = func_cap.phys_port_id; 894 dev->caps.phys_port_id[i] = func_cap.phys_port_id;
895 if (mlx4_get_slave_pkey_gid_tbl_len(dev, i, 895 err = mlx4_get_slave_pkey_gid_tbl_len(dev, i,
896 &dev->caps.gid_table_len[i], 896 &dev->caps.gid_table_len[i],
897 &dev->caps.pkey_table_len[i])) 897 &dev->caps.pkey_table_len[i]);
898 if (err)
898 goto err_mem; 899 goto err_mem;
899 } 900 }
900 901
@@ -906,6 +907,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
906 dev->caps.uar_page_size * dev->caps.num_uars, 907 dev->caps.uar_page_size * dev->caps.num_uars,
907 (unsigned long long) 908 (unsigned long long)
908 pci_resource_len(dev->persist->pdev, 2)); 909 pci_resource_len(dev->persist->pdev, 2));
910 err = -ENOMEM;
909 goto err_mem; 911 goto err_mem;
910 } 912 }
911 913
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 9813d34f3e5b..6fec3e993d02 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -4952,26 +4952,41 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4952 struct res_counter *counter; 4952 struct res_counter *counter;
4953 struct res_counter *tmp; 4953 struct res_counter *tmp;
4954 int err; 4954 int err;
4955 int index; 4955 int *counters_arr = NULL;
4956 int i, j;
4956 4957
4957 err = move_all_busy(dev, slave, RES_COUNTER); 4958 err = move_all_busy(dev, slave, RES_COUNTER);
4958 if (err) 4959 if (err)
4959 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n", 4960 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
4960 slave); 4961 slave);
4961 4962
4962 spin_lock_irq(mlx4_tlock(dev)); 4963 counters_arr = kmalloc_array(dev->caps.max_counters,
4963 list_for_each_entry_safe(counter, tmp, counter_list, com.list) { 4964 sizeof(*counters_arr), GFP_KERNEL);
4964 if (counter->com.owner == slave) { 4965 if (!counters_arr)
4965 index = counter->com.res_id; 4966 return;
4966 rb_erase(&counter->com.node, 4967
4967 &tracker->res_tree[RES_COUNTER]); 4968 do {
4968 list_del(&counter->com.list); 4969 i = 0;
4969 kfree(counter); 4970 j = 0;
4970 __mlx4_counter_free(dev, index); 4971 spin_lock_irq(mlx4_tlock(dev));
4972 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4973 if (counter->com.owner == slave) {
4974 counters_arr[i++] = counter->com.res_id;
4975 rb_erase(&counter->com.node,
4976 &tracker->res_tree[RES_COUNTER]);
4977 list_del(&counter->com.list);
4978 kfree(counter);
4979 }
4980 }
4981 spin_unlock_irq(mlx4_tlock(dev));
4982
4983 while (j < i) {
4984 __mlx4_counter_free(dev, counters_arr[j++]);
4971 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); 4985 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
4972 } 4986 }
4973 } 4987 } while (i);
4974 spin_unlock_irq(mlx4_tlock(dev)); 4988
4989 kfree(counters_arr);
4975} 4990}
4976 4991
4977static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave) 4992static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index f2ae62dd8c09..22e72bf1ae48 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -334,9 +334,15 @@ struct mlx5e_tx_skb_cb {
334 334
335#define MLX5E_TX_SKB_CB(__skb) ((struct mlx5e_tx_skb_cb *)__skb->cb) 335#define MLX5E_TX_SKB_CB(__skb) ((struct mlx5e_tx_skb_cb *)__skb->cb)
336 336
337enum mlx5e_dma_map_type {
338 MLX5E_DMA_MAP_SINGLE,
339 MLX5E_DMA_MAP_PAGE
340};
341
337struct mlx5e_sq_dma { 342struct mlx5e_sq_dma {
338 dma_addr_t addr; 343 dma_addr_t addr;
339 u32 size; 344 u32 size;
345 enum mlx5e_dma_map_type type;
340}; 346};
341 347
342enum { 348enum {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 5fc4d2d78cdf..1e52db32c73d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1332,6 +1332,42 @@ static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
1332 return err; 1332 return err;
1333} 1333}
1334 1334
1335static int mlx5e_refresh_tir_self_loopback_enable(struct mlx5_core_dev *mdev,
1336 u32 tirn)
1337{
1338 void *in;
1339 int inlen;
1340 int err;
1341
1342 inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1343 in = mlx5_vzalloc(inlen);
1344 if (!in)
1345 return -ENOMEM;
1346
1347 MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
1348
1349 err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
1350
1351 kvfree(in);
1352
1353 return err;
1354}
1355
1356static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
1357{
1358 int err;
1359 int i;
1360
1361 for (i = 0; i < MLX5E_NUM_TT; i++) {
1362 err = mlx5e_refresh_tir_self_loopback_enable(priv->mdev,
1363 priv->tirn[i]);
1364 if (err)
1365 return err;
1366 }
1367
1368 return 0;
1369}
1370
1335static int mlx5e_set_dev_port_mtu(struct net_device *netdev) 1371static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
1336{ 1372{
1337 struct mlx5e_priv *priv = netdev_priv(netdev); 1373 struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -1376,6 +1412,13 @@ int mlx5e_open_locked(struct net_device *netdev)
1376 goto err_clear_state_opened_flag; 1412 goto err_clear_state_opened_flag;
1377 } 1413 }
1378 1414
1415 err = mlx5e_refresh_tirs_self_loopback_enable(priv);
1416 if (err) {
1417 netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n",
1418 __func__, err);
1419 goto err_close_channels;
1420 }
1421
1379 mlx5e_update_carrier(priv); 1422 mlx5e_update_carrier(priv);
1380 mlx5e_redirect_rqts(priv); 1423 mlx5e_redirect_rqts(priv);
1381 1424
@@ -1383,6 +1426,8 @@ int mlx5e_open_locked(struct net_device *netdev)
1383 1426
1384 return 0; 1427 return 0;
1385 1428
1429err_close_channels:
1430 mlx5e_close_channels(priv);
1386err_clear_state_opened_flag: 1431err_clear_state_opened_flag:
1387 clear_bit(MLX5E_STATE_OPENED, &priv->state); 1432 clear_bit(MLX5E_STATE_OPENED, &priv->state);
1388 return err; 1433 return err;
@@ -1856,6 +1901,8 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
1856 1901
1857 mlx5_query_port_max_mtu(mdev, &max_mtu, 1); 1902 mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
1858 1903
1904 max_mtu = MLX5E_HW2SW_MTU(max_mtu);
1905
1859 if (new_mtu > max_mtu) { 1906 if (new_mtu > max_mtu) {
1860 netdev_err(netdev, 1907 netdev_err(netdev,
1861 "%s: Bad MTU (%d) > (%d) Max\n", 1908 "%s: Bad MTU (%d) > (%d) Max\n",
@@ -1909,6 +1956,9 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
1909 "Not creating net device, some required device capabilities are missing\n"); 1956 "Not creating net device, some required device capabilities are missing\n");
1910 return -ENOTSUPP; 1957 return -ENOTSUPP;
1911 } 1958 }
1959 if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
1960 mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
1961
1912 return 0; 1962 return 0;
1913} 1963}
1914 1964
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index cd8f85a251d7..1341b1d3c421 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -61,39 +61,47 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
61 } 61 }
62} 62}
63 63
64static void mlx5e_dma_pop_last_pushed(struct mlx5e_sq *sq, dma_addr_t *addr, 64static inline void mlx5e_tx_dma_unmap(struct device *pdev,
65 u32 *size) 65 struct mlx5e_sq_dma *dma)
66{ 66{
67 sq->dma_fifo_pc--; 67 switch (dma->type) {
68 *addr = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr; 68 case MLX5E_DMA_MAP_SINGLE:
69 *size = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size; 69 dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
70} 70 break;
71 71 case MLX5E_DMA_MAP_PAGE:
72static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, struct sk_buff *skb) 72 dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
73{ 73 break;
74 dma_addr_t addr; 74 default:
75 u32 size; 75 WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
76 int i;
77
78 for (i = 0; i < MLX5E_TX_SKB_CB(skb)->num_dma; i++) {
79 mlx5e_dma_pop_last_pushed(sq, &addr, &size);
80 dma_unmap_single(sq->pdev, addr, size, DMA_TO_DEVICE);
81 } 76 }
82} 77}
83 78
84static inline void mlx5e_dma_push(struct mlx5e_sq *sq, dma_addr_t addr, 79static inline void mlx5e_dma_push(struct mlx5e_sq *sq,
85 u32 size) 80 dma_addr_t addr,
81 u32 size,
82 enum mlx5e_dma_map_type map_type)
86{ 83{
87 sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr; 84 sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr;
88 sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size; 85 sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size;
86 sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].type = map_type;
89 sq->dma_fifo_pc++; 87 sq->dma_fifo_pc++;
90} 88}
91 89
92static inline void mlx5e_dma_get(struct mlx5e_sq *sq, u32 i, dma_addr_t *addr, 90static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_sq *sq, u32 i)
93 u32 *size)
94{ 91{
95 *addr = sq->dma_fifo[i & sq->dma_fifo_mask].addr; 92 return &sq->dma_fifo[i & sq->dma_fifo_mask];
96 *size = sq->dma_fifo[i & sq->dma_fifo_mask].size; 93}
94
95static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, struct sk_buff *skb)
96{
97 int i;
98
99 for (i = 0; i < MLX5E_TX_SKB_CB(skb)->num_dma; i++) {
100 struct mlx5e_sq_dma *last_pushed_dma =
101 mlx5e_dma_get(sq, --sq->dma_fifo_pc);
102
103 mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma);
104 }
97} 105}
98 106
99u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, 107u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
@@ -118,8 +126,15 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
118 */ 126 */
119#define MLX5E_MIN_INLINE ETH_HLEN 127#define MLX5E_MIN_INLINE ETH_HLEN
120 128
121 if (bf && (skb_headlen(skb) <= sq->max_inline)) 129 if (bf) {
122 return skb_headlen(skb); 130 u16 ihs = skb_headlen(skb);
131
132 if (skb_vlan_tag_present(skb))
133 ihs += VLAN_HLEN;
134
135 if (ihs <= sq->max_inline)
136 return skb_headlen(skb);
137 }
123 138
124 return MLX5E_MIN_INLINE; 139 return MLX5E_MIN_INLINE;
125} 140}
@@ -218,7 +233,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
218 dseg->lkey = sq->mkey_be; 233 dseg->lkey = sq->mkey_be;
219 dseg->byte_count = cpu_to_be32(headlen); 234 dseg->byte_count = cpu_to_be32(headlen);
220 235
221 mlx5e_dma_push(sq, dma_addr, headlen); 236 mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
222 MLX5E_TX_SKB_CB(skb)->num_dma++; 237 MLX5E_TX_SKB_CB(skb)->num_dma++;
223 238
224 dseg++; 239 dseg++;
@@ -237,7 +252,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
237 dseg->lkey = sq->mkey_be; 252 dseg->lkey = sq->mkey_be;
238 dseg->byte_count = cpu_to_be32(fsz); 253 dseg->byte_count = cpu_to_be32(fsz);
239 254
240 mlx5e_dma_push(sq, dma_addr, fsz); 255 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
241 MLX5E_TX_SKB_CB(skb)->num_dma++; 256 MLX5E_TX_SKB_CB(skb)->num_dma++;
242 257
243 dseg++; 258 dseg++;
@@ -353,13 +368,10 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
353 } 368 }
354 369
355 for (j = 0; j < MLX5E_TX_SKB_CB(skb)->num_dma; j++) { 370 for (j = 0; j < MLX5E_TX_SKB_CB(skb)->num_dma; j++) {
356 dma_addr_t addr; 371 struct mlx5e_sq_dma *dma =
357 u32 size; 372 mlx5e_dma_get(sq, dma_fifo_cc++);
358 373
359 mlx5e_dma_get(sq, dma_fifo_cc, &addr, &size); 374 mlx5e_tx_dma_unmap(sq->pdev, dma);
360 dma_fifo_cc++;
361 dma_unmap_single(sq->pdev, addr, size,
362 DMA_TO_DEVICE);
363 } 375 }
364 376
365 npkts++; 377 npkts++;
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index b159ef8303cc..057665180f13 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1326,7 +1326,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
1326 /* Get platform resources */ 1326 /* Get platform resources */
1327 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1327 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1328 irq = platform_get_irq(pdev, 0); 1328 irq = platform_get_irq(pdev, 0);
1329 if ((!res) || (irq < 0) || (irq >= NR_IRQS)) { 1329 if (!res || irq < 0) {
1330 dev_err(&pdev->dev, "error getting resources.\n"); 1330 dev_err(&pdev->dev, "error getting resources.\n");
1331 ret = -ENXIO; 1331 ret = -ENXIO;
1332 goto err_exit; 1332 goto err_exit;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index b4f21232019a..79ef799f88ab 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -7429,15 +7429,15 @@ process_pkt:
7429 7429
7430 rtl8169_rx_vlan_tag(desc, skb); 7430 rtl8169_rx_vlan_tag(desc, skb);
7431 7431
7432 if (skb->pkt_type == PACKET_MULTICAST)
7433 dev->stats.multicast++;
7434
7432 napi_gro_receive(&tp->napi, skb); 7435 napi_gro_receive(&tp->napi, skb);
7433 7436
7434 u64_stats_update_begin(&tp->rx_stats.syncp); 7437 u64_stats_update_begin(&tp->rx_stats.syncp);
7435 tp->rx_stats.packets++; 7438 tp->rx_stats.packets++;
7436 tp->rx_stats.bytes += pkt_size; 7439 tp->rx_stats.bytes += pkt_size;
7437 u64_stats_update_end(&tp->rx_stats.syncp); 7440 u64_stats_update_end(&tp->rx_stats.syncp);
7438
7439 if (skb->pkt_type == PACKET_MULTICAST)
7440 dev->stats.multicast++;
7441 } 7441 }
7442release_descriptor: 7442release_descriptor:
7443 desc->opts2 = 0; 7443 desc->opts2 = 0;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index aa7b2083cb53..ed5da4d47668 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -408,8 +408,6 @@ static int ravb_dmac_init(struct net_device *ndev)
408 /* Interrupt enable: */ 408 /* Interrupt enable: */
409 /* Frame receive */ 409 /* Frame receive */
410 ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0); 410 ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
411 /* Receive FIFO full warning */
412 ravb_write(ndev, RIC1_RFWE, RIC1);
413 /* Receive FIFO full error, descriptor empty */ 411 /* Receive FIFO full error, descriptor empty */
414 ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2); 412 ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2);
415 /* Frame transmitted, timestamp FIFO updated */ 413 /* Frame transmitted, timestamp FIFO updated */
@@ -733,8 +731,10 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id)
733 ((tis & tic) & BIT(q))) { 731 ((tis & tic) & BIT(q))) {
734 if (napi_schedule_prep(&priv->napi[q])) { 732 if (napi_schedule_prep(&priv->napi[q])) {
735 /* Mask RX and TX interrupts */ 733 /* Mask RX and TX interrupts */
736 ravb_write(ndev, ric0 & ~BIT(q), RIC0); 734 ric0 &= ~BIT(q);
737 ravb_write(ndev, tic & ~BIT(q), TIC); 735 tic &= ~BIT(q);
736 ravb_write(ndev, ric0, RIC0);
737 ravb_write(ndev, tic, TIC);
738 __napi_schedule(&priv->napi[q]); 738 __napi_schedule(&priv->napi[q]);
739 } else { 739 } else {
740 netdev_warn(ndev, 740 netdev_warn(ndev,
@@ -1225,7 +1225,7 @@ static int ravb_open(struct net_device *ndev)
1225 /* Device init */ 1225 /* Device init */
1226 error = ravb_dmac_init(ndev); 1226 error = ravb_dmac_init(ndev);
1227 if (error) 1227 if (error)
1228 goto out_free_irq; 1228 goto out_free_irq2;
1229 ravb_emac_init(ndev); 1229 ravb_emac_init(ndev);
1230 1230
1231 /* Initialise PTP Clock driver */ 1231 /* Initialise PTP Clock driver */
@@ -1243,9 +1243,11 @@ static int ravb_open(struct net_device *ndev)
1243out_ptp_stop: 1243out_ptp_stop:
1244 /* Stop PTP Clock driver */ 1244 /* Stop PTP Clock driver */
1245 ravb_ptp_stop(ndev); 1245 ravb_ptp_stop(ndev);
1246out_free_irq2:
1247 if (priv->chip_id == RCAR_GEN3)
1248 free_irq(priv->emac_irq, ndev);
1246out_free_irq: 1249out_free_irq:
1247 free_irq(ndev->irq, ndev); 1250 free_irq(ndev->irq, ndev);
1248 free_irq(priv->emac_irq, ndev);
1249out_napi_off: 1251out_napi_off:
1250 napi_disable(&priv->napi[RAVB_NC]); 1252 napi_disable(&priv->napi[RAVB_NC]);
1251 napi_disable(&priv->napi[RAVB_BE]); 1253 napi_disable(&priv->napi[RAVB_BE]);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index d288f1c928de..a3c42a376741 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -3422,7 +3422,7 @@ out:
3422 * with our request for slot reset the mmio_enabled callback will never be 3422 * with our request for slot reset the mmio_enabled callback will never be
3423 * called, and the link_reset callback is not used by AER or EEH mechanisms. 3423 * called, and the link_reset callback is not used by AER or EEH mechanisms.
3424 */ 3424 */
3425static struct pci_error_handlers efx_err_handlers = { 3425static const struct pci_error_handlers efx_err_handlers = {
3426 .error_detected = efx_io_error_detected, 3426 .error_detected = efx_io_error_detected,
3427 .slot_reset = efx_io_slot_reset, 3427 .slot_reset = efx_io_slot_reset,
3428 .resume = efx_io_resume, 3428 .resume = efx_io_resume,
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index c860c9007e49..219a99b7a631 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -809,22 +809,17 @@ static int smsc911x_phy_check_loopbackpkt(struct smsc911x_data *pdata)
809 809
810static int smsc911x_phy_reset(struct smsc911x_data *pdata) 810static int smsc911x_phy_reset(struct smsc911x_data *pdata)
811{ 811{
812 struct phy_device *phy_dev = pdata->phy_dev;
813 unsigned int temp; 812 unsigned int temp;
814 unsigned int i = 100000; 813 unsigned int i = 100000;
815 814
816 BUG_ON(!phy_dev); 815 temp = smsc911x_reg_read(pdata, PMT_CTRL);
817 BUG_ON(!phy_dev->bus); 816 smsc911x_reg_write(pdata, PMT_CTRL, temp | PMT_CTRL_PHY_RST_);
818
819 SMSC_TRACE(pdata, hw, "Performing PHY BCR Reset");
820 smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR, BMCR_RESET);
821 do { 817 do {
822 msleep(1); 818 msleep(1);
823 temp = smsc911x_mii_read(phy_dev->bus, phy_dev->addr, 819 temp = smsc911x_reg_read(pdata, PMT_CTRL);
824 MII_BMCR); 820 } while ((i--) && (temp & PMT_CTRL_PHY_RST_));
825 } while ((i--) && (temp & BMCR_RESET));
826 821
827 if (temp & BMCR_RESET) { 822 if (unlikely(temp & PMT_CTRL_PHY_RST_)) {
828 SMSC_WARN(pdata, hw, "PHY reset failed to complete"); 823 SMSC_WARN(pdata, hw, "PHY reset failed to complete");
829 return -EIO; 824 return -EIO;
830 } 825 }
@@ -2296,7 +2291,7 @@ static int smsc911x_init(struct net_device *dev)
2296 } 2291 }
2297 2292
2298 /* Reset the LAN911x */ 2293 /* Reset the LAN911x */
2299 if (smsc911x_soft_reset(pdata)) 2294 if (smsc911x_phy_reset(pdata) || smsc911x_soft_reset(pdata))
2300 return -ENODEV; 2295 return -ENODEV;
2301 2296
2302 dev->flags |= IFF_MULTICAST; 2297 dev->flags |= IFF_MULTICAST;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 9d89bdbf029f..82de68b1a452 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -337,11 +337,11 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
337 QSGMII_PHY_RX_SIGNAL_DETECT_EN | 337 QSGMII_PHY_RX_SIGNAL_DETECT_EN |
338 QSGMII_PHY_TX_DRIVER_EN | 338 QSGMII_PHY_TX_DRIVER_EN |
339 QSGMII_PHY_QSGMII_EN | 339 QSGMII_PHY_QSGMII_EN |
340 0x4 << QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET | 340 0x4ul << QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET |
341 0x3 << QSGMII_PHY_RX_DC_BIAS_OFFSET | 341 0x3ul << QSGMII_PHY_RX_DC_BIAS_OFFSET |
342 0x1 << QSGMII_PHY_RX_INPUT_EQU_OFFSET | 342 0x1ul << QSGMII_PHY_RX_INPUT_EQU_OFFSET |
343 0x2 << QSGMII_PHY_CDR_PI_SLEW_OFFSET | 343 0x2ul << QSGMII_PHY_CDR_PI_SLEW_OFFSET |
344 0xC << QSGMII_PHY_TX_DRV_AMP_OFFSET); 344 0xCul << QSGMII_PHY_TX_DRV_AMP_OFFSET);
345 } 345 }
346 346
347 plat_dat->has_gmac = true; 347 plat_dat->has_gmac = true;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index 7f6f4a4fcc70..58c05acc2aab 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -299,16 +299,17 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
299 if (IS_PHY_IF_MODE_GBIT(dwmac->interface)) { 299 if (IS_PHY_IF_MODE_GBIT(dwmac->interface)) {
300 const char *rs; 300 const char *rs;
301 301
302 dwmac->tx_retime_src = TX_RETIME_SRC_CLKGEN;
303
302 err = of_property_read_string(np, "st,tx-retime-src", &rs); 304 err = of_property_read_string(np, "st,tx-retime-src", &rs);
303 if (err < 0) { 305 if (err < 0) {
304 dev_warn(dev, "Use internal clock source\n"); 306 dev_warn(dev, "Use internal clock source\n");
305 dwmac->tx_retime_src = TX_RETIME_SRC_CLKGEN; 307 } else {
306 } else if (!strcasecmp(rs, "clk_125")) { 308 if (!strcasecmp(rs, "clk_125"))
307 dwmac->tx_retime_src = TX_RETIME_SRC_CLK_125; 309 dwmac->tx_retime_src = TX_RETIME_SRC_CLK_125;
308 } else if (!strcasecmp(rs, "txclk")) { 310 else if (!strcasecmp(rs, "txclk"))
309 dwmac->tx_retime_src = TX_RETIME_SRC_TXCLK; 311 dwmac->tx_retime_src = TX_RETIME_SRC_TXCLK;
310 } 312 }
311
312 dwmac->speed = SPEED_1000; 313 dwmac->speed = SPEED_1000;
313 } 314 }
314 315
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 64d8aa4e0cad..3c6549aee11d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -185,7 +185,7 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv)
185 priv->clk_csr = STMMAC_CSR_100_150M; 185 priv->clk_csr = STMMAC_CSR_100_150M;
186 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) 186 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
187 priv->clk_csr = STMMAC_CSR_150_250M; 187 priv->clk_csr = STMMAC_CSR_150_250M;
188 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M)) 188 else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
189 priv->clk_csr = STMMAC_CSR_250_300M; 189 priv->clk_csr = STMMAC_CSR_250_300M;
190 } 190 }
191} 191}
@@ -2232,6 +2232,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
2232 2232
2233 frame_len = priv->hw->desc->get_rx_frame_len(p, coe); 2233 frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
2234 2234
2235 /* check if frame_len fits the preallocated memory */
2236 if (frame_len > priv->dma_buf_sz) {
2237 priv->dev->stats.rx_length_errors++;
2238 break;
2239 }
2240
2235 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 2241 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
2236 * Type frames (LLC/LLC-SNAP) 2242 * Type frames (LLC/LLC-SNAP)
2237 */ 2243 */
@@ -3102,6 +3108,7 @@ int stmmac_resume(struct net_device *ndev)
3102 init_dma_desc_rings(ndev, GFP_ATOMIC); 3108 init_dma_desc_rings(ndev, GFP_ATOMIC);
3103 stmmac_hw_setup(ndev, false); 3109 stmmac_hw_setup(ndev, false);
3104 stmmac_init_tx_coalesce(priv); 3110 stmmac_init_tx_coalesce(priv);
3111 stmmac_set_rx_mode(ndev);
3105 3112
3106 napi_enable(&priv->napi); 3113 napi_enable(&priv->napi);
3107 3114
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index ebf6abc4853f..bba670c42e37 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -138,7 +138,6 @@ int stmmac_mdio_reset(struct mii_bus *bus)
138 138
139#ifdef CONFIG_OF 139#ifdef CONFIG_OF
140 if (priv->device->of_node) { 140 if (priv->device->of_node) {
141 int reset_gpio, active_low;
142 141
143 if (data->reset_gpio < 0) { 142 if (data->reset_gpio < 0) {
144 struct device_node *np = priv->device->of_node; 143 struct device_node *np = priv->device->of_node;
@@ -154,24 +153,23 @@ int stmmac_mdio_reset(struct mii_bus *bus)
154 "snps,reset-active-low"); 153 "snps,reset-active-low");
155 of_property_read_u32_array(np, 154 of_property_read_u32_array(np,
156 "snps,reset-delays-us", data->delays, 3); 155 "snps,reset-delays-us", data->delays, 3);
157 }
158 156
159 reset_gpio = data->reset_gpio; 157 if (gpio_request(data->reset_gpio, "mdio-reset"))
160 active_low = data->active_low; 158 return 0;
159 }
161 160
162 if (!gpio_request(reset_gpio, "mdio-reset")) { 161 gpio_direction_output(data->reset_gpio,
163 gpio_direction_output(reset_gpio, active_low ? 1 : 0); 162 data->active_low ? 1 : 0);
164 if (data->delays[0]) 163 if (data->delays[0])
165 msleep(DIV_ROUND_UP(data->delays[0], 1000)); 164 msleep(DIV_ROUND_UP(data->delays[0], 1000));
166 165
167 gpio_set_value(reset_gpio, active_low ? 0 : 1); 166 gpio_set_value(data->reset_gpio, data->active_low ? 0 : 1);
168 if (data->delays[1]) 167 if (data->delays[1])
169 msleep(DIV_ROUND_UP(data->delays[1], 1000)); 168 msleep(DIV_ROUND_UP(data->delays[1], 1000));
170 169
171 gpio_set_value(reset_gpio, active_low ? 1 : 0); 170 gpio_set_value(data->reset_gpio, data->active_low ? 1 : 0);
172 if (data->delays[2]) 171 if (data->delays[2])
173 msleep(DIV_ROUND_UP(data->delays[2], 1000)); 172 msleep(DIV_ROUND_UP(data->delays[2], 1000));
174 }
175 } 173 }
176#endif 174#endif
177 175
diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c
index c08be62bceba..1562ab4151e1 100644
--- a/drivers/net/ethernet/ti/cpsw-common.c
+++ b/drivers/net/ethernet/ti/cpsw-common.c
@@ -78,6 +78,9 @@ static int cpsw_am33xx_cm_get_macid(struct device *dev, u16 offset, int slave,
78 78
79int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr) 79int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr)
80{ 80{
81 if (of_machine_is_compatible("ti,dm8148"))
82 return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr);
83
81 if (of_machine_is_compatible("ti,am33xx")) 84 if (of_machine_is_compatible("ti,am33xx"))
82 return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr); 85 return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr);
83 86
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index ae68afd50a15..f38696ceee74 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -345,13 +345,6 @@ VELOCITY_PARAM(flow_control, "Enable flow control ability");
345*/ 345*/
346VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode"); 346VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode");
347 347
348#define VAL_PKT_LEN_DEF 0
349/* ValPktLen[] is used for setting the checksum offload ability of NIC.
350 0: Receive frame with invalid layer 2 length (Default)
351 1: Drop frame with invalid layer 2 length
352*/
353VELOCITY_PARAM(ValPktLen, "Receiving or Drop invalid 802.3 frame");
354
355#define WOL_OPT_DEF 0 348#define WOL_OPT_DEF 0
356#define WOL_OPT_MIN 0 349#define WOL_OPT_MIN 0
357#define WOL_OPT_MAX 7 350#define WOL_OPT_MAX 7
@@ -494,7 +487,6 @@ static void velocity_get_options(struct velocity_opt *opts, int index,
494 487
495 velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname); 488 velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname);
496 velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname); 489 velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname);
497 velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname);
498 velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname); 490 velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname);
499 velocity_set_int_opt(&opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname); 491 velocity_set_int_opt(&opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
500 opts->numrx = (opts->numrx & ~3); 492 opts->numrx = (opts->numrx & ~3);
@@ -2055,8 +2047,9 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2055 int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff; 2047 int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
2056 struct sk_buff *skb; 2048 struct sk_buff *skb;
2057 2049
2058 if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) { 2050 if (unlikely(rd->rdesc0.RSR & (RSR_STP | RSR_EDP | RSR_RL))) {
2059 VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame spans multiple RDs.\n", vptr->netdev->name); 2051 if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP))
2052 VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame spans multiple RDs.\n", vptr->netdev->name);
2060 stats->rx_length_errors++; 2053 stats->rx_length_errors++;
2061 return -EINVAL; 2054 return -EINVAL;
2062 } 2055 }
@@ -2069,17 +2062,6 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2069 dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma, 2062 dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma,
2070 vptr->rx.buf_sz, DMA_FROM_DEVICE); 2063 vptr->rx.buf_sz, DMA_FROM_DEVICE);
2071 2064
2072 /*
2073 * Drop frame not meeting IEEE 802.3
2074 */
2075
2076 if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) {
2077 if (rd->rdesc0.RSR & RSR_RL) {
2078 stats->rx_length_errors++;
2079 return -EINVAL;
2080 }
2081 }
2082
2083 velocity_rx_csum(rd, skb); 2065 velocity_rx_csum(rd, skb);
2084 2066
2085 if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) { 2067 if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c
index bb8b5304d851..b103adb8d62e 100644
--- a/drivers/net/fjes/fjes_hw.c
+++ b/drivers/net/fjes/fjes_hw.c
@@ -599,7 +599,7 @@ int fjes_hw_unregister_buff_addr(struct fjes_hw *hw, int dest_epid)
599 FJES_CMD_REQ_RES_CODE_BUSY) && 599 FJES_CMD_REQ_RES_CODE_BUSY) &&
600 (timeout > 0)) { 600 (timeout > 0)) {
601 msleep(200 + hw->my_epid * 20); 601 msleep(200 + hw->my_epid * 20);
602 timeout -= (200 + hw->my_epid * 20); 602 timeout -= (200 + hw->my_epid * 20);
603 603
604 res_buf->unshare_buffer.length = 0; 604 res_buf->unshare_buffer.length = 0;
605 res_buf->unshare_buffer.code = 0; 605 res_buf->unshare_buffer.code = 0;
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index d50887e3df6d..8c48bb2a94ea 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -254,7 +254,7 @@ acct:
254 } 254 }
255} 255}
256 256
257static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff *skb, 257static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb,
258 bool local) 258 bool local)
259{ 259{
260 struct ipvl_dev *ipvlan = addr->master; 260 struct ipvl_dev *ipvlan = addr->master;
@@ -262,6 +262,7 @@ static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff *skb,
262 unsigned int len; 262 unsigned int len;
263 rx_handler_result_t ret = RX_HANDLER_CONSUMED; 263 rx_handler_result_t ret = RX_HANDLER_CONSUMED;
264 bool success = false; 264 bool success = false;
265 struct sk_buff *skb = *pskb;
265 266
266 len = skb->len + ETH_HLEN; 267 len = skb->len + ETH_HLEN;
267 if (unlikely(!(dev->flags & IFF_UP))) { 268 if (unlikely(!(dev->flags & IFF_UP))) {
@@ -273,6 +274,7 @@ static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff *skb,
273 if (!skb) 274 if (!skb)
274 goto out; 275 goto out;
275 276
277 *pskb = skb;
276 skb->dev = dev; 278 skb->dev = dev;
277 skb->pkt_type = PACKET_HOST; 279 skb->pkt_type = PACKET_HOST;
278 280
@@ -486,7 +488,7 @@ static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
486 488
487 addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true); 489 addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
488 if (addr) 490 if (addr)
489 return ipvlan_rcv_frame(addr, skb, true); 491 return ipvlan_rcv_frame(addr, &skb, true);
490 492
491out: 493out:
492 skb->dev = ipvlan->phy_dev; 494 skb->dev = ipvlan->phy_dev;
@@ -506,7 +508,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
506 if (lyr3h) { 508 if (lyr3h) {
507 addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true); 509 addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
508 if (addr) 510 if (addr)
509 return ipvlan_rcv_frame(addr, skb, true); 511 return ipvlan_rcv_frame(addr, &skb, true);
510 } 512 }
511 skb = skb_share_check(skb, GFP_ATOMIC); 513 skb = skb_share_check(skb, GFP_ATOMIC);
512 if (!skb) 514 if (!skb)
@@ -589,7 +591,7 @@ static rx_handler_result_t ipvlan_handle_mode_l3(struct sk_buff **pskb,
589 591
590 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true); 592 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
591 if (addr) 593 if (addr)
592 ret = ipvlan_rcv_frame(addr, skb, false); 594 ret = ipvlan_rcv_frame(addr, pskb, false);
593 595
594out: 596out:
595 return ret; 597 return ret;
@@ -626,7 +628,7 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
626 628
627 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true); 629 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
628 if (addr) 630 if (addr)
629 ret = ipvlan_rcv_frame(addr, skb, false); 631 ret = ipvlan_rcv_frame(addr, pskb, false);
630 } 632 }
631 633
632 return ret; 634 return ret;
@@ -651,5 +653,5 @@ rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb)
651 WARN_ONCE(true, "ipvlan_handle_frame() called for mode = [%hx]\n", 653 WARN_ONCE(true, "ipvlan_handle_frame() called for mode = [%hx]\n",
652 port->mode); 654 port->mode);
653 kfree_skb(skb); 655 kfree_skb(skb);
654 return NET_RX_DROP; 656 return RX_HANDLER_CONSUMED;
655} 657}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 86f6c6292c27..06c8bfeaccd6 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -415,6 +415,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
415 skb = ip_check_defrag(dev_net(skb->dev), skb, IP_DEFRAG_MACVLAN); 415 skb = ip_check_defrag(dev_net(skb->dev), skb, IP_DEFRAG_MACVLAN);
416 if (!skb) 416 if (!skb)
417 return RX_HANDLER_CONSUMED; 417 return RX_HANDLER_CONSUMED;
418 *pskb = skb;
418 eth = eth_hdr(skb); 419 eth = eth_hdr(skb);
419 macvlan_forward_source(skb, port, eth->h_source); 420 macvlan_forward_source(skb, port, eth->h_source);
420 src = macvlan_hash_lookup(port, eth->h_source); 421 src = macvlan_hash_lookup(port, eth->h_source);
@@ -456,6 +457,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
456 goto out; 457 goto out;
457 } 458 }
458 459
460 *pskb = skb;
459 skb->dev = dev; 461 skb->dev = dev;
460 skb->pkt_type = PACKET_HOST; 462 skb->pkt_type = PACKET_HOST;
461 463
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 54036ae0a388..0fc521941c71 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -498,7 +498,7 @@ static void macvtap_sock_write_space(struct sock *sk)
498 wait_queue_head_t *wqueue; 498 wait_queue_head_t *wqueue;
499 499
500 if (!sock_writeable(sk) || 500 if (!sock_writeable(sk) ||
501 !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags)) 501 !test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
502 return; 502 return;
503 503
504 wqueue = sk_sleep(sk); 504 wqueue = sk_sleep(sk);
@@ -585,7 +585,7 @@ static unsigned int macvtap_poll(struct file *file, poll_table * wait)
585 mask |= POLLIN | POLLRDNORM; 585 mask |= POLLIN | POLLRDNORM;
586 586
587 if (sock_writeable(&q->sk) || 587 if (sock_writeable(&q->sk) ||
588 (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock.flags) && 588 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) &&
589 sock_writeable(&q->sk))) 589 sock_writeable(&q->sk)))
590 mask |= POLLOUT | POLLWRNORM; 590 mask |= POLLOUT | POLLWRNORM;
591 591
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index fabf11d32d27..2d020a3ec0b5 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -308,6 +308,8 @@ static struct phy_driver at803x_driver[] = {
308 .flags = PHY_HAS_INTERRUPT, 308 .flags = PHY_HAS_INTERRUPT,
309 .config_aneg = genphy_config_aneg, 309 .config_aneg = genphy_config_aneg,
310 .read_status = genphy_read_status, 310 .read_status = genphy_read_status,
311 .ack_interrupt = at803x_ack_interrupt,
312 .config_intr = at803x_config_intr,
311 .driver = { 313 .driver = {
312 .owner = THIS_MODULE, 314 .owner = THIS_MODULE,
313 }, 315 },
@@ -327,6 +329,8 @@ static struct phy_driver at803x_driver[] = {
327 .flags = PHY_HAS_INTERRUPT, 329 .flags = PHY_HAS_INTERRUPT,
328 .config_aneg = genphy_config_aneg, 330 .config_aneg = genphy_config_aneg,
329 .read_status = genphy_read_status, 331 .read_status = genphy_read_status,
332 .ack_interrupt = at803x_ack_interrupt,
333 .config_intr = at803x_config_intr,
330 .driver = { 334 .driver = {
331 .owner = THIS_MODULE, 335 .owner = THIS_MODULE,
332 }, 336 },
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 07a6119121c3..3ce5d9514623 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -614,7 +614,7 @@ static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
614 { PHY_ID_BCM5461, 0xfffffff0 }, 614 { PHY_ID_BCM5461, 0xfffffff0 },
615 { PHY_ID_BCM54616S, 0xfffffff0 }, 615 { PHY_ID_BCM54616S, 0xfffffff0 },
616 { PHY_ID_BCM5464, 0xfffffff0 }, 616 { PHY_ID_BCM5464, 0xfffffff0 },
617 { PHY_ID_BCM5482, 0xfffffff0 }, 617 { PHY_ID_BCM5481, 0xfffffff0 },
618 { PHY_ID_BCM5482, 0xfffffff0 }, 618 { PHY_ID_BCM5482, 0xfffffff0 },
619 { PHY_ID_BCM50610, 0xfffffff0 }, 619 { PHY_ID_BCM50610, 0xfffffff0 },
620 { PHY_ID_BCM50610M, 0xfffffff0 }, 620 { PHY_ID_BCM50610M, 0xfffffff0 },
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 5de8d5827536..0240552b50f3 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1154,6 +1154,21 @@ static struct phy_driver marvell_drivers[] = {
1154 .driver = { .owner = THIS_MODULE }, 1154 .driver = { .owner = THIS_MODULE },
1155 }, 1155 },
1156 { 1156 {
1157 .phy_id = MARVELL_PHY_ID_88E1540,
1158 .phy_id_mask = MARVELL_PHY_ID_MASK,
1159 .name = "Marvell 88E1540",
1160 .features = PHY_GBIT_FEATURES,
1161 .flags = PHY_HAS_INTERRUPT,
1162 .config_aneg = &m88e1510_config_aneg,
1163 .read_status = &marvell_read_status,
1164 .ack_interrupt = &marvell_ack_interrupt,
1165 .config_intr = &marvell_config_intr,
1166 .did_interrupt = &m88e1121_did_interrupt,
1167 .resume = &genphy_resume,
1168 .suspend = &genphy_suspend,
1169 .driver = { .owner = THIS_MODULE },
1170 },
1171 {
1157 .phy_id = MARVELL_PHY_ID_88E3016, 1172 .phy_id = MARVELL_PHY_ID_88E3016,
1158 .phy_id_mask = MARVELL_PHY_ID_MASK, 1173 .phy_id_mask = MARVELL_PHY_ID_MASK,
1159 .name = "Marvell 88E3016", 1174 .name = "Marvell 88E3016",
@@ -1186,6 +1201,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = {
1186 { MARVELL_PHY_ID_88E1318S, MARVELL_PHY_ID_MASK }, 1201 { MARVELL_PHY_ID_88E1318S, MARVELL_PHY_ID_MASK },
1187 { MARVELL_PHY_ID_88E1116R, MARVELL_PHY_ID_MASK }, 1202 { MARVELL_PHY_ID_88E1116R, MARVELL_PHY_ID_MASK },
1188 { MARVELL_PHY_ID_88E1510, MARVELL_PHY_ID_MASK }, 1203 { MARVELL_PHY_ID_88E1510, MARVELL_PHY_ID_MASK },
1204 { MARVELL_PHY_ID_88E1540, MARVELL_PHY_ID_MASK },
1189 { MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK }, 1205 { MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK },
1190 { } 1206 { }
1191}; 1207};
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index adb48abafc87..47cd306dbb3c 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -448,7 +448,8 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
448 mdiobus_write(phydev->bus, mii_data->phy_id, 448 mdiobus_write(phydev->bus, mii_data->phy_id,
449 mii_data->reg_num, val); 449 mii_data->reg_num, val);
450 450
451 if (mii_data->reg_num == MII_BMCR && 451 if (mii_data->phy_id == phydev->addr &&
452 mii_data->reg_num == MII_BMCR &&
452 val & BMCR_RESET) 453 val & BMCR_RESET)
453 return phy_init_hw(phydev); 454 return phy_init_hw(phydev);
454 455
@@ -863,6 +864,9 @@ void phy_state_machine(struct work_struct *work)
863 needs_aneg = true; 864 needs_aneg = true;
864 break; 865 break;
865 case PHY_NOLINK: 866 case PHY_NOLINK:
867 if (phy_interrupt_is_valid(phydev))
868 break;
869
866 err = phy_read_status(phydev); 870 err = phy_read_status(phydev);
867 if (err) 871 if (err)
868 break; 872 break;
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 76cad712ddb2..dd295dbaa074 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -66,6 +66,7 @@
66#define PHY_ID_VSC8244 0x000fc6c0 66#define PHY_ID_VSC8244 0x000fc6c0
67#define PHY_ID_VSC8514 0x00070670 67#define PHY_ID_VSC8514 0x00070670
68#define PHY_ID_VSC8574 0x000704a0 68#define PHY_ID_VSC8574 0x000704a0
69#define PHY_ID_VSC8601 0x00070420
69#define PHY_ID_VSC8662 0x00070660 70#define PHY_ID_VSC8662 0x00070660
70#define PHY_ID_VSC8221 0x000fc550 71#define PHY_ID_VSC8221 0x000fc550
71#define PHY_ID_VSC8211 0x000fc4b0 72#define PHY_ID_VSC8211 0x000fc4b0
@@ -133,7 +134,8 @@ static int vsc82xx_config_intr(struct phy_device *phydev)
133 (phydev->drv->phy_id == PHY_ID_VSC8234 || 134 (phydev->drv->phy_id == PHY_ID_VSC8234 ||
134 phydev->drv->phy_id == PHY_ID_VSC8244 || 135 phydev->drv->phy_id == PHY_ID_VSC8244 ||
135 phydev->drv->phy_id == PHY_ID_VSC8514 || 136 phydev->drv->phy_id == PHY_ID_VSC8514 ||
136 phydev->drv->phy_id == PHY_ID_VSC8574) ? 137 phydev->drv->phy_id == PHY_ID_VSC8574 ||
138 phydev->drv->phy_id == PHY_ID_VSC8601) ?
137 MII_VSC8244_IMASK_MASK : 139 MII_VSC8244_IMASK_MASK :
138 MII_VSC8221_IMASK_MASK); 140 MII_VSC8221_IMASK_MASK);
139 else { 141 else {
@@ -272,6 +274,18 @@ static struct phy_driver vsc82xx_driver[] = {
272 .config_intr = &vsc82xx_config_intr, 274 .config_intr = &vsc82xx_config_intr,
273 .driver = { .owner = THIS_MODULE,}, 275 .driver = { .owner = THIS_MODULE,},
274}, { 276}, {
277 .phy_id = PHY_ID_VSC8601,
278 .name = "Vitesse VSC8601",
279 .phy_id_mask = 0x000ffff0,
280 .features = PHY_GBIT_FEATURES,
281 .flags = PHY_HAS_INTERRUPT,
282 .config_init = &genphy_config_init,
283 .config_aneg = &genphy_config_aneg,
284 .read_status = &genphy_read_status,
285 .ack_interrupt = &vsc824x_ack_interrupt,
286 .config_intr = &vsc82xx_config_intr,
287 .driver = { .owner = THIS_MODULE,},
288}, {
275 .phy_id = PHY_ID_VSC8662, 289 .phy_id = PHY_ID_VSC8662,
276 .name = "Vitesse VSC8662", 290 .name = "Vitesse VSC8662",
277 .phy_id_mask = 0x000ffff0, 291 .phy_id_mask = 0x000ffff0,
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index b1878faea397..f0db770e8b2f 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1040,7 +1040,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
1040 mask |= POLLIN | POLLRDNORM; 1040 mask |= POLLIN | POLLRDNORM;
1041 1041
1042 if (sock_writeable(sk) || 1042 if (sock_writeable(sk) ||
1043 (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags) && 1043 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
1044 sock_writeable(sk))) 1044 sock_writeable(sk)))
1045 mask |= POLLOUT | POLLWRNORM; 1045 mask |= POLLOUT | POLLWRNORM;
1046 1046
@@ -1488,7 +1488,7 @@ static void tun_sock_write_space(struct sock *sk)
1488 if (!sock_writeable(sk)) 1488 if (!sock_writeable(sk))
1489 return; 1489 return;
1490 1490
1491 if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags)) 1491 if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
1492 return; 1492 return;
1493 1493
1494 wqueue = sk_sleep(sk); 1494 wqueue = sk_sleep(sk);
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index c78d3cb1b464..3da70bf9936a 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -696,6 +696,11 @@ static const struct usb_device_id products[] = {
696 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), 696 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
697 .driver_info = (kernel_ulong_t) &wwan_info, 697 .driver_info = (kernel_ulong_t) &wwan_info,
698}, { 698}, {
699 /* Dell DW5580 modules */
700 USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, 0x81ba, USB_CLASS_COMM,
701 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
702 .driver_info = (kernel_ulong_t)&wwan_info,
703}, {
699 USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, 704 USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
700 USB_CDC_PROTO_NONE), 705 USB_CDC_PROTO_NONE),
701 .driver_info = (unsigned long) &cdc_info, 706 .driver_info = (unsigned long) &cdc_info,
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index a187f08113ec..3b1ba8237768 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -691,7 +691,6 @@ static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
691 691
692int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags) 692int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags)
693{ 693{
694 const struct usb_cdc_union_desc *union_desc = NULL;
695 struct cdc_ncm_ctx *ctx; 694 struct cdc_ncm_ctx *ctx;
696 struct usb_driver *driver; 695 struct usb_driver *driver;
697 u8 *buf; 696 u8 *buf;
@@ -725,15 +724,16 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
725 /* parse through descriptors associated with control interface */ 724 /* parse through descriptors associated with control interface */
726 cdc_parse_cdc_header(&hdr, intf, buf, len); 725 cdc_parse_cdc_header(&hdr, intf, buf, len);
727 726
728 ctx->data = usb_ifnum_to_if(dev->udev, 727 if (hdr.usb_cdc_union_desc)
729 hdr.usb_cdc_union_desc->bSlaveInterface0); 728 ctx->data = usb_ifnum_to_if(dev->udev,
729 hdr.usb_cdc_union_desc->bSlaveInterface0);
730 ctx->ether_desc = hdr.usb_cdc_ether_desc; 730 ctx->ether_desc = hdr.usb_cdc_ether_desc;
731 ctx->func_desc = hdr.usb_cdc_ncm_desc; 731 ctx->func_desc = hdr.usb_cdc_ncm_desc;
732 ctx->mbim_desc = hdr.usb_cdc_mbim_desc; 732 ctx->mbim_desc = hdr.usb_cdc_mbim_desc;
733 ctx->mbim_extended_desc = hdr.usb_cdc_mbim_extended_desc; 733 ctx->mbim_extended_desc = hdr.usb_cdc_mbim_extended_desc;
734 734
735 /* some buggy devices have an IAD but no CDC Union */ 735 /* some buggy devices have an IAD but no CDC Union */
736 if (!union_desc && intf->intf_assoc && intf->intf_assoc->bInterfaceCount == 2) { 736 if (!hdr.usb_cdc_union_desc && intf->intf_assoc && intf->intf_assoc->bInterfaceCount == 2) {
737 ctx->data = usb_ifnum_to_if(dev->udev, intf->cur_altsetting->desc.bInterfaceNumber + 1); 737 ctx->data = usb_ifnum_to_if(dev->udev, intf->cur_altsetting->desc.bInterfaceNumber + 1);
738 dev_dbg(&intf->dev, "CDC Union missing - got slave from IAD\n"); 738 dev_dbg(&intf->dev, "CDC Union missing - got slave from IAD\n");
739 } 739 }
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 34799eaace41..9a5be8b85186 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -725,6 +725,7 @@ static const struct usb_device_id products[] = {
725 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ 725 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
726 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 726 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
727 {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */ 727 {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
728 {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
728 {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */ 729 {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */
729 {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */ 730 {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */
730 {QMI_FIXED_INTF(0x0b3c, 0xc002, 4)}, /* Olivetti Olicard 140 */ 731 {QMI_FIXED_INTF(0x0b3c, 0xc002, 4)}, /* Olivetti Olicard 140 */
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 46f4caddccbe..417903715437 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -587,6 +587,12 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
587 &adapter->pdev->dev, 587 &adapter->pdev->dev,
588 rbi->skb->data, rbi->len, 588 rbi->skb->data, rbi->len,
589 PCI_DMA_FROMDEVICE); 589 PCI_DMA_FROMDEVICE);
590 if (dma_mapping_error(&adapter->pdev->dev,
591 rbi->dma_addr)) {
592 dev_kfree_skb_any(rbi->skb);
593 rq->stats.rx_buf_alloc_failure++;
594 break;
595 }
590 } else { 596 } else {
591 /* rx buffer skipped by the device */ 597 /* rx buffer skipped by the device */
592 } 598 }
@@ -605,13 +611,18 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
605 &adapter->pdev->dev, 611 &adapter->pdev->dev,
606 rbi->page, 0, PAGE_SIZE, 612 rbi->page, 0, PAGE_SIZE,
607 PCI_DMA_FROMDEVICE); 613 PCI_DMA_FROMDEVICE);
614 if (dma_mapping_error(&adapter->pdev->dev,
615 rbi->dma_addr)) {
616 put_page(rbi->page);
617 rq->stats.rx_buf_alloc_failure++;
618 break;
619 }
608 } else { 620 } else {
609 /* rx buffers skipped by the device */ 621 /* rx buffers skipped by the device */
610 } 622 }
611 val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT; 623 val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
612 } 624 }
613 625
614 BUG_ON(rbi->dma_addr == 0);
615 gd->rxd.addr = cpu_to_le64(rbi->dma_addr); 626 gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
616 gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT) 627 gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
617 | val | rbi->len); 628 | val | rbi->len);
@@ -655,7 +666,7 @@ vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
655} 666}
656 667
657 668
658static void 669static int
659vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, 670vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
660 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev, 671 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
661 struct vmxnet3_adapter *adapter) 672 struct vmxnet3_adapter *adapter)
@@ -715,6 +726,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
715 tbi->dma_addr = dma_map_single(&adapter->pdev->dev, 726 tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
716 skb->data + buf_offset, buf_size, 727 skb->data + buf_offset, buf_size,
717 PCI_DMA_TODEVICE); 728 PCI_DMA_TODEVICE);
729 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
730 return -EFAULT;
718 731
719 tbi->len = buf_size; 732 tbi->len = buf_size;
720 733
@@ -755,6 +768,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
755 tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag, 768 tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
756 buf_offset, buf_size, 769 buf_offset, buf_size,
757 DMA_TO_DEVICE); 770 DMA_TO_DEVICE);
771 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
772 return -EFAULT;
758 773
759 tbi->len = buf_size; 774 tbi->len = buf_size;
760 775
@@ -782,6 +797,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
782 /* set the last buf_info for the pkt */ 797 /* set the last buf_info for the pkt */
783 tbi->skb = skb; 798 tbi->skb = skb;
784 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base; 799 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
800
801 return 0;
785} 802}
786 803
787 804
@@ -1020,7 +1037,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1020 } 1037 }
1021 1038
1022 /* fill tx descs related to addr & len */ 1039 /* fill tx descs related to addr & len */
1023 vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter); 1040 if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter))
1041 goto unlock_drop_pkt;
1024 1042
1025 /* setup the EOP desc */ 1043 /* setup the EOP desc */
1026 ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP); 1044 ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
@@ -1231,6 +1249,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1231 struct vmxnet3_rx_buf_info *rbi; 1249 struct vmxnet3_rx_buf_info *rbi;
1232 struct sk_buff *skb, *new_skb = NULL; 1250 struct sk_buff *skb, *new_skb = NULL;
1233 struct page *new_page = NULL; 1251 struct page *new_page = NULL;
1252 dma_addr_t new_dma_addr;
1234 int num_to_alloc; 1253 int num_to_alloc;
1235 struct Vmxnet3_RxDesc *rxd; 1254 struct Vmxnet3_RxDesc *rxd;
1236 u32 idx, ring_idx; 1255 u32 idx, ring_idx;
@@ -1287,6 +1306,21 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1287 skip_page_frags = true; 1306 skip_page_frags = true;
1288 goto rcd_done; 1307 goto rcd_done;
1289 } 1308 }
1309 new_dma_addr = dma_map_single(&adapter->pdev->dev,
1310 new_skb->data, rbi->len,
1311 PCI_DMA_FROMDEVICE);
1312 if (dma_mapping_error(&adapter->pdev->dev,
1313 new_dma_addr)) {
1314 dev_kfree_skb(new_skb);
1315 /* Skb allocation failed, do not handover this
1316 * skb to stack. Reuse it. Drop the existing pkt
1317 */
1318 rq->stats.rx_buf_alloc_failure++;
1319 ctx->skb = NULL;
1320 rq->stats.drop_total++;
1321 skip_page_frags = true;
1322 goto rcd_done;
1323 }
1290 1324
1291 dma_unmap_single(&adapter->pdev->dev, rbi->dma_addr, 1325 dma_unmap_single(&adapter->pdev->dev, rbi->dma_addr,
1292 rbi->len, 1326 rbi->len,
@@ -1303,9 +1337,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1303 1337
1304 /* Immediate refill */ 1338 /* Immediate refill */
1305 rbi->skb = new_skb; 1339 rbi->skb = new_skb;
1306 rbi->dma_addr = dma_map_single(&adapter->pdev->dev, 1340 rbi->dma_addr = new_dma_addr;
1307 rbi->skb->data, rbi->len,
1308 PCI_DMA_FROMDEVICE);
1309 rxd->addr = cpu_to_le64(rbi->dma_addr); 1341 rxd->addr = cpu_to_le64(rbi->dma_addr);
1310 rxd->len = rbi->len; 1342 rxd->len = rbi->len;
1311 if (adapter->version == 2 && 1343 if (adapter->version == 2 &&
@@ -1348,6 +1380,19 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1348 skip_page_frags = true; 1380 skip_page_frags = true;
1349 goto rcd_done; 1381 goto rcd_done;
1350 } 1382 }
1383 new_dma_addr = dma_map_page(&adapter->pdev->dev
1384 , rbi->page,
1385 0, PAGE_SIZE,
1386 PCI_DMA_FROMDEVICE);
1387 if (dma_mapping_error(&adapter->pdev->dev,
1388 new_dma_addr)) {
1389 put_page(new_page);
1390 rq->stats.rx_buf_alloc_failure++;
1391 dev_kfree_skb(ctx->skb);
1392 ctx->skb = NULL;
1393 skip_page_frags = true;
1394 goto rcd_done;
1395 }
1351 1396
1352 dma_unmap_page(&adapter->pdev->dev, 1397 dma_unmap_page(&adapter->pdev->dev,
1353 rbi->dma_addr, rbi->len, 1398 rbi->dma_addr, rbi->len,
@@ -1357,10 +1402,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1357 1402
1358 /* Immediate refill */ 1403 /* Immediate refill */
1359 rbi->page = new_page; 1404 rbi->page = new_page;
1360 rbi->dma_addr = dma_map_page(&adapter->pdev->dev 1405 rbi->dma_addr = new_dma_addr;
1361 , rbi->page,
1362 0, PAGE_SIZE,
1363 PCI_DMA_FROMDEVICE);
1364 rxd->addr = cpu_to_le64(rbi->dma_addr); 1406 rxd->addr = cpu_to_le64(rbi->dma_addr);
1365 rxd->len = rbi->len; 1407 rxd->len = rbi->len;
1366 } 1408 }
@@ -2157,16 +2199,18 @@ vmxnet3_set_mc(struct net_device *netdev)
2157 if (!netdev_mc_empty(netdev)) { 2199 if (!netdev_mc_empty(netdev)) {
2158 new_table = vmxnet3_copy_mc(netdev); 2200 new_table = vmxnet3_copy_mc(netdev);
2159 if (new_table) { 2201 if (new_table) {
2160 rxConf->mfTableLen = cpu_to_le16( 2202 size_t sz = netdev_mc_count(netdev) * ETH_ALEN;
2161 netdev_mc_count(netdev) * ETH_ALEN); 2203
2204 rxConf->mfTableLen = cpu_to_le16(sz);
2162 new_table_pa = dma_map_single( 2205 new_table_pa = dma_map_single(
2163 &adapter->pdev->dev, 2206 &adapter->pdev->dev,
2164 new_table, 2207 new_table,
2165 rxConf->mfTableLen, 2208 sz,
2166 PCI_DMA_TODEVICE); 2209 PCI_DMA_TODEVICE);
2167 } 2210 }
2168 2211
2169 if (new_table_pa) { 2212 if (!dma_mapping_error(&adapter->pdev->dev,
2213 new_table_pa)) {
2170 new_mode |= VMXNET3_RXM_MCAST; 2214 new_mode |= VMXNET3_RXM_MCAST;
2171 rxConf->mfTablePA = cpu_to_le64(new_table_pa); 2215 rxConf->mfTablePA = cpu_to_le64(new_table_pa);
2172 } else { 2216 } else {
@@ -3074,6 +3118,11 @@ vmxnet3_probe_device(struct pci_dev *pdev,
3074 adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter, 3118 adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
3075 sizeof(struct vmxnet3_adapter), 3119 sizeof(struct vmxnet3_adapter),
3076 PCI_DMA_TODEVICE); 3120 PCI_DMA_TODEVICE);
3121 if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
3122 dev_err(&pdev->dev, "Failed to map dma\n");
3123 err = -EFAULT;
3124 goto err_dma_map;
3125 }
3077 adapter->shared = dma_alloc_coherent( 3126 adapter->shared = dma_alloc_coherent(
3078 &adapter->pdev->dev, 3127 &adapter->pdev->dev,
3079 sizeof(struct Vmxnet3_DriverShared), 3128 sizeof(struct Vmxnet3_DriverShared),
@@ -3232,6 +3281,7 @@ err_alloc_queue_desc:
3232err_alloc_shared: 3281err_alloc_shared:
3233 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa, 3282 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
3234 sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE); 3283 sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
3284err_dma_map:
3235 free_netdev(netdev); 3285 free_netdev(netdev);
3236 return err; 3286 return err;
3237} 3287}
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 3f859a55c035..4c58c83dc225 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
69/* 69/*
70 * Version numbers 70 * Version numbers
71 */ 71 */
72#define VMXNET3_DRIVER_VERSION_STRING "1.4.3.0-k" 72#define VMXNET3_DRIVER_VERSION_STRING "1.4.4.0-k"
73 73
74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
75#define VMXNET3_DRIVER_VERSION_NUM 0x01040300 75#define VMXNET3_DRIVER_VERSION_NUM 0x01040400
76 76
77#if defined(CONFIG_PCI_MSI) 77#if defined(CONFIG_PCI_MSI)
78 /* RSS only makes sense if MSI-X is supported. */ 78 /* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 92fa3e1ea65c..4f9748457f5a 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -907,7 +907,6 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
907 struct nlattr *tb[], struct nlattr *data[]) 907 struct nlattr *tb[], struct nlattr *data[])
908{ 908{
909 struct net_vrf *vrf = netdev_priv(dev); 909 struct net_vrf *vrf = netdev_priv(dev);
910 int err;
911 910
912 if (!data || !data[IFLA_VRF_TABLE]) 911 if (!data || !data[IFLA_VRF_TABLE])
913 return -EINVAL; 912 return -EINVAL;
@@ -916,15 +915,7 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
916 915
917 dev->priv_flags |= IFF_L3MDEV_MASTER; 916 dev->priv_flags |= IFF_L3MDEV_MASTER;
918 917
919 err = register_netdevice(dev); 918 return register_netdevice(dev);
920 if (err < 0)
921 goto out_fail;
922
923 return 0;
924
925out_fail:
926 free_netdev(dev);
927 return err;
928} 919}
929 920
930static size_t vrf_nl_getsize(const struct net_device *dev) 921static size_t vrf_nl_getsize(const struct net_device *dev)
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index e92aaf615901..89541cc90e87 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -1075,11 +1075,10 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
1075 1075
1076 used = pvc_is_used(pvc); 1076 used = pvc_is_used(pvc);
1077 1077
1078 if (type == ARPHRD_ETHER) { 1078 if (type == ARPHRD_ETHER)
1079 dev = alloc_netdev(0, "pvceth%d", NET_NAME_UNKNOWN, 1079 dev = alloc_netdev(0, "pvceth%d", NET_NAME_UNKNOWN,
1080 ether_setup); 1080 ether_setup);
1081 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1081 else
1082 } else
1083 dev = alloc_netdev(0, "pvc%d", NET_NAME_UNKNOWN, pvc_setup); 1082 dev = alloc_netdev(0, "pvc%d", NET_NAME_UNKNOWN, pvc_setup);
1084 1083
1085 if (!dev) { 1084 if (!dev) {
@@ -1088,9 +1087,10 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
1088 return -ENOBUFS; 1087 return -ENOBUFS;
1089 } 1088 }
1090 1089
1091 if (type == ARPHRD_ETHER) 1090 if (type == ARPHRD_ETHER) {
1091 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1092 eth_hw_addr_random(dev); 1092 eth_hw_addr_random(dev);
1093 else { 1093 } else {
1094 *(__be16*)dev->dev_addr = htons(dlci); 1094 *(__be16*)dev->dev_addr = htons(dlci);
1095 dlci_to_q922(dev->broadcast, dlci); 1095 dlci_to_q922(dev->broadcast, dlci);
1096 } 1096 }
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 5c47b011a9d7..cd39025d2abf 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -549,16 +549,12 @@ static void x25_asy_receive_buf(struct tty_struct *tty,
549 549
550static int x25_asy_open_tty(struct tty_struct *tty) 550static int x25_asy_open_tty(struct tty_struct *tty)
551{ 551{
552 struct x25_asy *sl = tty->disc_data; 552 struct x25_asy *sl;
553 int err; 553 int err;
554 554
555 if (tty->ops->write == NULL) 555 if (tty->ops->write == NULL)
556 return -EOPNOTSUPP; 556 return -EOPNOTSUPP;
557 557
558 /* First make sure we're not already connected. */
559 if (sl && sl->magic == X25_ASY_MAGIC)
560 return -EEXIST;
561
562 /* OK. Find a free X.25 channel to use. */ 558 /* OK. Find a free X.25 channel to use. */
563 sl = x25_asy_alloc(); 559 sl = x25_asy_alloc();
564 if (sl == NULL) 560 if (sl == NULL)
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index aa9bd92ac4ed..0947cc271e69 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -51,6 +51,7 @@ MODULE_PARM_DESC(rawmode, "Use raw 802.11 frame datapath");
51static const struct ath10k_hw_params ath10k_hw_params_list[] = { 51static const struct ath10k_hw_params ath10k_hw_params_list[] = {
52 { 52 {
53 .id = QCA988X_HW_2_0_VERSION, 53 .id = QCA988X_HW_2_0_VERSION,
54 .dev_id = QCA988X_2_0_DEVICE_ID,
54 .name = "qca988x hw2.0", 55 .name = "qca988x hw2.0",
55 .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR, 56 .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
56 .uart_pin = 7, 57 .uart_pin = 7,
@@ -69,6 +70,25 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
69 }, 70 },
70 { 71 {
71 .id = QCA6174_HW_2_1_VERSION, 72 .id = QCA6174_HW_2_1_VERSION,
73 .dev_id = QCA6164_2_1_DEVICE_ID,
74 .name = "qca6164 hw2.1",
75 .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
76 .uart_pin = 6,
77 .otp_exe_param = 0,
78 .channel_counters_freq_hz = 88000,
79 .max_probe_resp_desc_thres = 0,
80 .fw = {
81 .dir = QCA6174_HW_2_1_FW_DIR,
82 .fw = QCA6174_HW_2_1_FW_FILE,
83 .otp = QCA6174_HW_2_1_OTP_FILE,
84 .board = QCA6174_HW_2_1_BOARD_DATA_FILE,
85 .board_size = QCA6174_BOARD_DATA_SZ,
86 .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
87 },
88 },
89 {
90 .id = QCA6174_HW_2_1_VERSION,
91 .dev_id = QCA6174_2_1_DEVICE_ID,
72 .name = "qca6174 hw2.1", 92 .name = "qca6174 hw2.1",
73 .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR, 93 .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
74 .uart_pin = 6, 94 .uart_pin = 6,
@@ -86,6 +106,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
86 }, 106 },
87 { 107 {
88 .id = QCA6174_HW_3_0_VERSION, 108 .id = QCA6174_HW_3_0_VERSION,
109 .dev_id = QCA6174_2_1_DEVICE_ID,
89 .name = "qca6174 hw3.0", 110 .name = "qca6174 hw3.0",
90 .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR, 111 .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
91 .uart_pin = 6, 112 .uart_pin = 6,
@@ -103,6 +124,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
103 }, 124 },
104 { 125 {
105 .id = QCA6174_HW_3_2_VERSION, 126 .id = QCA6174_HW_3_2_VERSION,
127 .dev_id = QCA6174_2_1_DEVICE_ID,
106 .name = "qca6174 hw3.2", 128 .name = "qca6174 hw3.2",
107 .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR, 129 .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
108 .uart_pin = 6, 130 .uart_pin = 6,
@@ -121,6 +143,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
121 }, 143 },
122 { 144 {
123 .id = QCA99X0_HW_2_0_DEV_VERSION, 145 .id = QCA99X0_HW_2_0_DEV_VERSION,
146 .dev_id = QCA99X0_2_0_DEVICE_ID,
124 .name = "qca99x0 hw2.0", 147 .name = "qca99x0 hw2.0",
125 .patch_load_addr = QCA99X0_HW_2_0_PATCH_LOAD_ADDR, 148 .patch_load_addr = QCA99X0_HW_2_0_PATCH_LOAD_ADDR,
126 .uart_pin = 7, 149 .uart_pin = 7,
@@ -139,10 +162,31 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
139 }, 162 },
140 { 163 {
141 .id = QCA9377_HW_1_0_DEV_VERSION, 164 .id = QCA9377_HW_1_0_DEV_VERSION,
165 .dev_id = QCA9377_1_0_DEVICE_ID,
142 .name = "qca9377 hw1.0", 166 .name = "qca9377 hw1.0",
143 .patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR, 167 .patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR,
144 .uart_pin = 7, 168 .uart_pin = 6,
145 .otp_exe_param = 0, 169 .otp_exe_param = 0,
170 .channel_counters_freq_hz = 88000,
171 .max_probe_resp_desc_thres = 0,
172 .fw = {
173 .dir = QCA9377_HW_1_0_FW_DIR,
174 .fw = QCA9377_HW_1_0_FW_FILE,
175 .otp = QCA9377_HW_1_0_OTP_FILE,
176 .board = QCA9377_HW_1_0_BOARD_DATA_FILE,
177 .board_size = QCA9377_BOARD_DATA_SZ,
178 .board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
179 },
180 },
181 {
182 .id = QCA9377_HW_1_1_DEV_VERSION,
183 .dev_id = QCA9377_1_0_DEVICE_ID,
184 .name = "qca9377 hw1.1",
185 .patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR,
186 .uart_pin = 6,
187 .otp_exe_param = 0,
188 .channel_counters_freq_hz = 88000,
189 .max_probe_resp_desc_thres = 0,
146 .fw = { 190 .fw = {
147 .dir = QCA9377_HW_1_0_FW_DIR, 191 .dir = QCA9377_HW_1_0_FW_DIR,
148 .fw = QCA9377_HW_1_0_FW_FILE, 192 .fw = QCA9377_HW_1_0_FW_FILE,
@@ -1263,7 +1307,8 @@ static int ath10k_init_hw_params(struct ath10k *ar)
1263 for (i = 0; i < ARRAY_SIZE(ath10k_hw_params_list); i++) { 1307 for (i = 0; i < ARRAY_SIZE(ath10k_hw_params_list); i++) {
1264 hw_params = &ath10k_hw_params_list[i]; 1308 hw_params = &ath10k_hw_params_list[i];
1265 1309
1266 if (hw_params->id == ar->target_version) 1310 if (hw_params->id == ar->target_version &&
1311 hw_params->dev_id == ar->dev_id)
1267 break; 1312 break;
1268 } 1313 }
1269 1314
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 018c64f4fd25..858d75f49a9f 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -636,6 +636,7 @@ struct ath10k {
636 636
637 struct ath10k_hw_params { 637 struct ath10k_hw_params {
638 u32 id; 638 u32 id;
639 u16 dev_id;
639 const char *name; 640 const char *name;
640 u32 patch_load_addr; 641 u32 patch_load_addr;
641 int uart_pin; 642 int uart_pin;
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 39966a05c1cc..713c2bcea178 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -22,6 +22,12 @@
22 22
23#define ATH10K_FW_DIR "ath10k" 23#define ATH10K_FW_DIR "ath10k"
24 24
25#define QCA988X_2_0_DEVICE_ID (0x003c)
26#define QCA6164_2_1_DEVICE_ID (0x0041)
27#define QCA6174_2_1_DEVICE_ID (0x003e)
28#define QCA99X0_2_0_DEVICE_ID (0x0040)
29#define QCA9377_1_0_DEVICE_ID (0x0042)
30
25/* QCA988X 1.0 definitions (unsupported) */ 31/* QCA988X 1.0 definitions (unsupported) */
26#define QCA988X_HW_1_0_CHIP_ID_REV 0x0 32#define QCA988X_HW_1_0_CHIP_ID_REV 0x0
27 33
@@ -42,6 +48,10 @@
42#define QCA6174_HW_3_0_VERSION 0x05020000 48#define QCA6174_HW_3_0_VERSION 0x05020000
43#define QCA6174_HW_3_2_VERSION 0x05030000 49#define QCA6174_HW_3_2_VERSION 0x05030000
44 50
51/* QCA9377 target BMI version signatures */
52#define QCA9377_HW_1_0_DEV_VERSION 0x05020000
53#define QCA9377_HW_1_1_DEV_VERSION 0x05020001
54
45enum qca6174_pci_rev { 55enum qca6174_pci_rev {
46 QCA6174_PCI_REV_1_1 = 0x11, 56 QCA6174_PCI_REV_1_1 = 0x11,
47 QCA6174_PCI_REV_1_3 = 0x13, 57 QCA6174_PCI_REV_1_3 = 0x13,
@@ -60,6 +70,11 @@ enum qca6174_chip_id_rev {
60 QCA6174_HW_3_2_CHIP_ID_REV = 10, 70 QCA6174_HW_3_2_CHIP_ID_REV = 10,
61}; 71};
62 72
73enum qca9377_chip_id_rev {
74 QCA9377_HW_1_0_CHIP_ID_REV = 0x0,
75 QCA9377_HW_1_1_CHIP_ID_REV = 0x1,
76};
77
63#define QCA6174_HW_2_1_FW_DIR "ath10k/QCA6174/hw2.1" 78#define QCA6174_HW_2_1_FW_DIR "ath10k/QCA6174/hw2.1"
64#define QCA6174_HW_2_1_FW_FILE "firmware.bin" 79#define QCA6174_HW_2_1_FW_FILE "firmware.bin"
65#define QCA6174_HW_2_1_OTP_FILE "otp.bin" 80#define QCA6174_HW_2_1_OTP_FILE "otp.bin"
@@ -85,8 +100,6 @@ enum qca6174_chip_id_rev {
85#define QCA99X0_HW_2_0_PATCH_LOAD_ADDR 0x1234 100#define QCA99X0_HW_2_0_PATCH_LOAD_ADDR 0x1234
86 101
87/* QCA9377 1.0 definitions */ 102/* QCA9377 1.0 definitions */
88#define QCA9377_HW_1_0_DEV_VERSION 0x05020001
89#define QCA9377_HW_1_0_CHIP_ID_REV 0x1
90#define QCA9377_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9377/hw1.0" 103#define QCA9377_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9377/hw1.0"
91#define QCA9377_HW_1_0_FW_FILE "firmware.bin" 104#define QCA9377_HW_1_0_FW_FILE "firmware.bin"
92#define QCA9377_HW_1_0_OTP_FILE "otp.bin" 105#define QCA9377_HW_1_0_OTP_FILE "otp.bin"
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index a7411fe90cc4..95a55405ebf0 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -4225,7 +4225,7 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
4225 4225
4226static u32 get_nss_from_chainmask(u16 chain_mask) 4226static u32 get_nss_from_chainmask(u16 chain_mask)
4227{ 4227{
4228 if ((chain_mask & 0x15) == 0x15) 4228 if ((chain_mask & 0xf) == 0xf)
4229 return 4; 4229 return 4;
4230 else if ((chain_mask & 0x7) == 0x7) 4230 else if ((chain_mask & 0x7) == 0x7)
4231 return 3; 4231 return 3;
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 3fca200b986c..930785a724e1 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -57,12 +57,6 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
57#define ATH10K_PCI_TARGET_WAIT 3000 57#define ATH10K_PCI_TARGET_WAIT 3000
58#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3 58#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
59 59
60#define QCA988X_2_0_DEVICE_ID (0x003c)
61#define QCA6164_2_1_DEVICE_ID (0x0041)
62#define QCA6174_2_1_DEVICE_ID (0x003e)
63#define QCA99X0_2_0_DEVICE_ID (0x0040)
64#define QCA9377_1_0_DEVICE_ID (0x0042)
65
66static const struct pci_device_id ath10k_pci_id_table[] = { 60static const struct pci_device_id ath10k_pci_id_table[] = {
67 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */ 61 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
68 { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */ 62 { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
@@ -92,7 +86,9 @@ static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
92 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV }, 86 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
93 87
94 { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV }, 88 { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
89
95 { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV }, 90 { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV },
91 { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV },
96}; 92};
97 93
98static void ath10k_pci_buffer_cleanup(struct ath10k *ar); 94static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
@@ -111,8 +107,9 @@ static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
111static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state); 107static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
112static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state); 108static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
113static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state); 109static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
110static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
114 111
115static const struct ce_attr host_ce_config_wlan[] = { 112static struct ce_attr host_ce_config_wlan[] = {
116 /* CE0: host->target HTC control and raw streams */ 113 /* CE0: host->target HTC control and raw streams */
117 { 114 {
118 .flags = CE_ATTR_FLAGS, 115 .flags = CE_ATTR_FLAGS,
@@ -128,7 +125,7 @@ static const struct ce_attr host_ce_config_wlan[] = {
128 .src_nentries = 0, 125 .src_nentries = 0,
129 .src_sz_max = 2048, 126 .src_sz_max = 2048,
130 .dest_nentries = 512, 127 .dest_nentries = 512,
131 .recv_cb = ath10k_pci_htc_rx_cb, 128 .recv_cb = ath10k_pci_htt_htc_rx_cb,
132 }, 129 },
133 130
134 /* CE2: target->host WMI */ 131 /* CE2: target->host WMI */
@@ -217,7 +214,7 @@ static const struct ce_attr host_ce_config_wlan[] = {
217}; 214};
218 215
219/* Target firmware's Copy Engine configuration. */ 216/* Target firmware's Copy Engine configuration. */
220static const struct ce_pipe_config target_ce_config_wlan[] = { 217static struct ce_pipe_config target_ce_config_wlan[] = {
221 /* CE0: host->target HTC control and raw streams */ 218 /* CE0: host->target HTC control and raw streams */
222 { 219 {
223 .pipenum = __cpu_to_le32(0), 220 .pipenum = __cpu_to_le32(0),
@@ -330,7 +327,7 @@ static const struct ce_pipe_config target_ce_config_wlan[] = {
330 * This table is derived from the CE_PCI TABLE, above. 327 * This table is derived from the CE_PCI TABLE, above.
331 * It is passed to the Target at startup for use by firmware. 328 * It is passed to the Target at startup for use by firmware.
332 */ 329 */
333static const struct service_to_pipe target_service_to_ce_map_wlan[] = { 330static struct service_to_pipe target_service_to_ce_map_wlan[] = {
334 { 331 {
335 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), 332 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
336 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 333 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
@@ -1208,6 +1205,16 @@ static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1208 ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); 1205 ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
1209} 1206}
1210 1207
1208static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1209{
1210 /* CE4 polling needs to be done whenever CE pipe which transports
1211 * HTT Rx (target->host) is processed.
1212 */
1213 ath10k_ce_per_engine_service(ce_state->ar, 4);
1214
1215 ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
1216}
1217
1211/* Called by lower (CE) layer when a send to HTT Target completes. */ 1218/* Called by lower (CE) layer when a send to HTT Target completes. */
1212static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state) 1219static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
1213{ 1220{
@@ -2027,6 +2034,29 @@ static int ath10k_pci_init_config(struct ath10k *ar)
2027 return 0; 2034 return 0;
2028} 2035}
2029 2036
2037static void ath10k_pci_override_ce_config(struct ath10k *ar)
2038{
2039 struct ce_attr *attr;
2040 struct ce_pipe_config *config;
2041
2042 /* For QCA6174 we're overriding the Copy Engine 5 configuration,
2043 * since it is currently used for other feature.
2044 */
2045
2046 /* Override Host's Copy Engine 5 configuration */
2047 attr = &host_ce_config_wlan[5];
2048 attr->src_sz_max = 0;
2049 attr->dest_nentries = 0;
2050
2051 /* Override Target firmware's Copy Engine configuration */
2052 config = &target_ce_config_wlan[5];
2053 config->pipedir = __cpu_to_le32(PIPEDIR_OUT);
2054 config->nbytes_max = __cpu_to_le32(2048);
2055
2056 /* Map from service/endpoint to Copy Engine */
2057 target_service_to_ce_map_wlan[15].pipenum = __cpu_to_le32(1);
2058}
2059
2030static int ath10k_pci_alloc_pipes(struct ath10k *ar) 2060static int ath10k_pci_alloc_pipes(struct ath10k *ar)
2031{ 2061{
2032 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2062 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -3020,6 +3050,9 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
3020 goto err_core_destroy; 3050 goto err_core_destroy;
3021 } 3051 }
3022 3052
3053 if (QCA_REV_6174(ar))
3054 ath10k_pci_override_ce_config(ar);
3055
3023 ret = ath10k_pci_alloc_pipes(ar); 3056 ret = ath10k_pci_alloc_pipes(ar);
3024 if (ret) { 3057 if (ret) {
3025 ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", 3058 ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 1a73c7a1da77..bf88ec3a65fa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -69,7 +69,7 @@
69#include "iwl-agn-hw.h" 69#include "iwl-agn-hw.h"
70 70
71/* Highest firmware API version supported */ 71/* Highest firmware API version supported */
72#define IWL7260_UCODE_API_MAX 17 72#define IWL7260_UCODE_API_MAX 19
73 73
74/* Oldest version we won't warn about */ 74/* Oldest version we won't warn about */
75#define IWL7260_UCODE_API_OK 13 75#define IWL7260_UCODE_API_OK 13
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c
index 0116e5a4c393..9bcc0bf937d8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-8000.c
@@ -69,7 +69,7 @@
69#include "iwl-agn-hw.h" 69#include "iwl-agn-hw.h"
70 70
71/* Highest firmware API version supported */ 71/* Highest firmware API version supported */
72#define IWL8000_UCODE_API_MAX 17 72#define IWL8000_UCODE_API_MAX 19
73 73
74/* Oldest version we won't warn about */ 74/* Oldest version we won't warn about */
75#define IWL8000_UCODE_API_OK 13 75#define IWL8000_UCODE_API_OK 13
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index 85ae902df7c0..29ae58ebf223 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -309,9 +309,9 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
309 * to transmit packets to the AP, i.e. the PTK. 309 * to transmit packets to the AP, i.e. the PTK.
310 */ 310 */
311 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { 311 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
312 key->hw_key_idx = 0;
313 mvm->ptk_ivlen = key->iv_len; 312 mvm->ptk_ivlen = key->iv_len;
314 mvm->ptk_icvlen = key->icv_len; 313 mvm->ptk_icvlen = key->icv_len;
314 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 0);
315 } else { 315 } else {
316 /* 316 /*
317 * firmware only supports TSC/RSC for a single key, 317 * firmware only supports TSC/RSC for a single key,
@@ -319,12 +319,11 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
319 * with new ones -- this relies on mac80211 doing 319 * with new ones -- this relies on mac80211 doing
320 * list_add_tail(). 320 * list_add_tail().
321 */ 321 */
322 key->hw_key_idx = 1;
323 mvm->gtk_ivlen = key->iv_len; 322 mvm->gtk_ivlen = key->iv_len;
324 mvm->gtk_icvlen = key->icv_len; 323 mvm->gtk_icvlen = key->icv_len;
324 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 1);
325 } 325 }
326 326
327 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, true);
328 data->error = ret != 0; 327 data->error = ret != 0;
329out_unlock: 328out_unlock:
330 mutex_unlock(&mvm->mutex); 329 mutex_unlock(&mvm->mutex);
@@ -772,9 +771,6 @@ static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm)
772 */ 771 */
773 set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 772 set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
774 773
775 /* We reprogram keys and shouldn't allocate new key indices */
776 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
777
778 mvm->ptk_ivlen = 0; 774 mvm->ptk_ivlen = 0;
779 mvm->ptk_icvlen = 0; 775 mvm->ptk_icvlen = 0;
780 mvm->ptk_ivlen = 0; 776 mvm->ptk_ivlen = 0;
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 1fb684693040..e88afac51c5d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -2941,6 +2941,7 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
2941{ 2941{
2942 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2942 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2943 int ret; 2943 int ret;
2944 u8 key_offset;
2944 2945
2945 if (iwlwifi_mod_params.sw_crypto) { 2946 if (iwlwifi_mod_params.sw_crypto) {
2946 IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n"); 2947 IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n");
@@ -3006,10 +3007,14 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
3006 break; 3007 break;
3007 } 3008 }
3008 3009
3010 /* in HW restart reuse the index, otherwise request a new one */
3011 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
3012 key_offset = key->hw_key_idx;
3013 else
3014 key_offset = STA_KEY_IDX_INVALID;
3015
3009 IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n"); 3016 IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n");
3010 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 3017 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset);
3011 test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
3012 &mvm->status));
3013 if (ret) { 3018 if (ret) {
3014 IWL_WARN(mvm, "set key failed\n"); 3019 IWL_WARN(mvm, "set key failed\n");
3015 /* 3020 /*
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 300a249486e4..354acbde088e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -1201,7 +1201,8 @@ static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
1201 return max_offs; 1201 return max_offs;
1202} 1202}
1203 1203
1204static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif, 1204static u8 iwl_mvm_get_key_sta_id(struct iwl_mvm *mvm,
1205 struct ieee80211_vif *vif,
1205 struct ieee80211_sta *sta) 1206 struct ieee80211_sta *sta)
1206{ 1207{
1207 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1208 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -1218,8 +1219,21 @@ static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif,
1218 * station ID, then use AP's station ID. 1219 * station ID, then use AP's station ID.
1219 */ 1220 */
1220 if (vif->type == NL80211_IFTYPE_STATION && 1221 if (vif->type == NL80211_IFTYPE_STATION &&
1221 mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) 1222 mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
1222 return mvmvif->ap_sta_id; 1223 u8 sta_id = mvmvif->ap_sta_id;
1224
1225 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1226 lockdep_is_held(&mvm->mutex));
1227 /*
1228 * It is possible that the 'sta' parameter is NULL,
1229 * for example when a GTK is removed - the sta_id will then
1230 * be the AP ID, and no station was passed by mac80211.
1231 */
1232 if (IS_ERR_OR_NULL(sta))
1233 return IWL_MVM_STATION_COUNT;
1234
1235 return sta_id;
1236 }
1223 1237
1224 return IWL_MVM_STATION_COUNT; 1238 return IWL_MVM_STATION_COUNT;
1225} 1239}
@@ -1227,7 +1241,8 @@ static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif,
1227static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, 1241static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
1228 struct iwl_mvm_sta *mvm_sta, 1242 struct iwl_mvm_sta *mvm_sta,
1229 struct ieee80211_key_conf *keyconf, bool mcast, 1243 struct ieee80211_key_conf *keyconf, bool mcast,
1230 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags) 1244 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
1245 u8 key_offset)
1231{ 1246{
1232 struct iwl_mvm_add_sta_key_cmd cmd = {}; 1247 struct iwl_mvm_add_sta_key_cmd cmd = {};
1233 __le16 key_flags; 1248 __le16 key_flags;
@@ -1269,7 +1284,7 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
1269 if (mcast) 1284 if (mcast)
1270 key_flags |= cpu_to_le16(STA_KEY_MULTICAST); 1285 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
1271 1286
1272 cmd.key_offset = keyconf->hw_key_idx; 1287 cmd.key_offset = key_offset;
1273 cmd.key_flags = key_flags; 1288 cmd.key_flags = key_flags;
1274 cmd.sta_id = sta_id; 1289 cmd.sta_id = sta_id;
1275 1290
@@ -1360,6 +1375,7 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1360 struct ieee80211_vif *vif, 1375 struct ieee80211_vif *vif,
1361 struct ieee80211_sta *sta, 1376 struct ieee80211_sta *sta,
1362 struct ieee80211_key_conf *keyconf, 1377 struct ieee80211_key_conf *keyconf,
1378 u8 key_offset,
1363 bool mcast) 1379 bool mcast)
1364{ 1380{
1365 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 1381 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
@@ -1375,17 +1391,17 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1375 ieee80211_get_key_rx_seq(keyconf, 0, &seq); 1391 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
1376 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); 1392 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
1377 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, 1393 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
1378 seq.tkip.iv32, p1k, 0); 1394 seq.tkip.iv32, p1k, 0, key_offset);
1379 break; 1395 break;
1380 case WLAN_CIPHER_SUITE_CCMP: 1396 case WLAN_CIPHER_SUITE_CCMP:
1381 case WLAN_CIPHER_SUITE_WEP40: 1397 case WLAN_CIPHER_SUITE_WEP40:
1382 case WLAN_CIPHER_SUITE_WEP104: 1398 case WLAN_CIPHER_SUITE_WEP104:
1383 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, 1399 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
1384 0, NULL, 0); 1400 0, NULL, 0, key_offset);
1385 break; 1401 break;
1386 default: 1402 default:
1387 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, 1403 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
1388 0, NULL, 0); 1404 0, NULL, 0, key_offset);
1389 } 1405 }
1390 1406
1391 return ret; 1407 return ret;
@@ -1433,7 +1449,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1433 struct ieee80211_vif *vif, 1449 struct ieee80211_vif *vif,
1434 struct ieee80211_sta *sta, 1450 struct ieee80211_sta *sta,
1435 struct ieee80211_key_conf *keyconf, 1451 struct ieee80211_key_conf *keyconf,
1436 bool have_key_offset) 1452 u8 key_offset)
1437{ 1453{
1438 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); 1454 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
1439 u8 sta_id; 1455 u8 sta_id;
@@ -1443,7 +1459,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1443 lockdep_assert_held(&mvm->mutex); 1459 lockdep_assert_held(&mvm->mutex);
1444 1460
1445 /* Get the station id from the mvm local station table */ 1461 /* Get the station id from the mvm local station table */
1446 sta_id = iwl_mvm_get_key_sta_id(vif, sta); 1462 sta_id = iwl_mvm_get_key_sta_id(mvm, vif, sta);
1447 if (sta_id == IWL_MVM_STATION_COUNT) { 1463 if (sta_id == IWL_MVM_STATION_COUNT) {
1448 IWL_ERR(mvm, "Failed to find station id\n"); 1464 IWL_ERR(mvm, "Failed to find station id\n");
1449 return -EINVAL; 1465 return -EINVAL;
@@ -1470,18 +1486,25 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1470 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif)) 1486 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
1471 return -EINVAL; 1487 return -EINVAL;
1472 1488
1473 if (!have_key_offset) { 1489 /* If the key_offset is not pre-assigned, we need to find a
1474 /* 1490 * new offset to use. In normal cases, the offset is not
1475 * The D3 firmware hardcodes the PTK offset to 0, so we have to 1491 * pre-assigned, but during HW_RESTART we want to reuse the
1476 * configure it there. As a result, this workaround exists to 1492 * same indices, so we pass them when this function is called.
1477 * let the caller set the key offset (hw_key_idx), see d3.c. 1493 *
1478 */ 1494 * In D3 entry, we need to hardcoded the indices (because the
1479 keyconf->hw_key_idx = iwl_mvm_set_fw_key_idx(mvm); 1495 * firmware hardcodes the PTK offset to 0). In this case, we
1480 if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID) 1496 * need to make sure we don't overwrite the hw_key_idx in the
1497 * keyconf structure, because otherwise we cannot configure
1498 * the original ones back when resuming.
1499 */
1500 if (key_offset == STA_KEY_IDX_INVALID) {
1501 key_offset = iwl_mvm_set_fw_key_idx(mvm);
1502 if (key_offset == STA_KEY_IDX_INVALID)
1481 return -ENOSPC; 1503 return -ENOSPC;
1504 keyconf->hw_key_idx = key_offset;
1482 } 1505 }
1483 1506
1484 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, mcast); 1507 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
1485 if (ret) { 1508 if (ret) {
1486 __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table); 1509 __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table);
1487 goto end; 1510 goto end;
@@ -1495,7 +1518,8 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1495 */ 1518 */
1496 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || 1519 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
1497 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) { 1520 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) {
1498 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, !mcast); 1521 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
1522 key_offset, !mcast);
1499 if (ret) { 1523 if (ret) {
1500 __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table); 1524 __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table);
1501 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); 1525 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
@@ -1521,7 +1545,7 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
1521 lockdep_assert_held(&mvm->mutex); 1545 lockdep_assert_held(&mvm->mutex);
1522 1546
1523 /* Get the station id from the mvm local station table */ 1547 /* Get the station id from the mvm local station table */
1524 sta_id = iwl_mvm_get_key_sta_id(vif, sta); 1548 sta_id = iwl_mvm_get_key_sta_id(mvm, vif, sta);
1525 1549
1526 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", 1550 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
1527 keyconf->keyidx, sta_id); 1551 keyconf->keyidx, sta_id);
@@ -1547,24 +1571,6 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
1547 return 0; 1571 return 0;
1548 } 1572 }
1549 1573
1550 /*
1551 * It is possible that the 'sta' parameter is NULL, and thus
1552 * there is a need to retrieve the sta from the local station table,
1553 * for example when a GTK is removed (where the sta_id will then be
1554 * the AP ID, and no station was passed by mac80211.)
1555 */
1556 if (!sta) {
1557 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1558 lockdep_is_held(&mvm->mutex));
1559 if (!sta) {
1560 IWL_ERR(mvm, "Invalid station id\n");
1561 return -EINVAL;
1562 }
1563 }
1564
1565 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
1566 return -EINVAL;
1567
1568 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); 1574 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
1569 if (ret) 1575 if (ret)
1570 return ret; 1576 return ret;
@@ -1584,7 +1590,7 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
1584 u16 *phase1key) 1590 u16 *phase1key)
1585{ 1591{
1586 struct iwl_mvm_sta *mvm_sta; 1592 struct iwl_mvm_sta *mvm_sta;
1587 u8 sta_id = iwl_mvm_get_key_sta_id(vif, sta); 1593 u8 sta_id = iwl_mvm_get_key_sta_id(mvm, vif, sta);
1588 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); 1594 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
1589 1595
1590 if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT)) 1596 if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT))
@@ -1602,7 +1608,7 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
1602 1608
1603 mvm_sta = iwl_mvm_sta_from_mac80211(sta); 1609 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1604 iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, 1610 iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
1605 iv32, phase1key, CMD_ASYNC); 1611 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx);
1606 rcu_read_unlock(); 1612 rcu_read_unlock();
1607} 1613}
1608 1614
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h
index eedb215eba3f..0631cc0a6d3c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.h
@@ -365,8 +365,8 @@ int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
365int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, 365int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
366 struct ieee80211_vif *vif, 366 struct ieee80211_vif *vif,
367 struct ieee80211_sta *sta, 367 struct ieee80211_sta *sta,
368 struct ieee80211_key_conf *key, 368 struct ieee80211_key_conf *keyconf,
369 bool have_key_offset); 369 u8 key_offset);
370int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, 370int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
371 struct ieee80211_vif *vif, 371 struct ieee80211_vif *vif,
372 struct ieee80211_sta *sta, 372 struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 644b58bc5226..639761fb2bfb 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -423,14 +423,21 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
423/* 8000 Series */ 423/* 8000 Series */
424 {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)}, 424 {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
425 {IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8260_2ac_cfg)}, 425 {IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8260_2ac_cfg)},
426 {IWL_PCI_DEVICE(0x24F3, 0x0130, iwl8260_2ac_cfg)},
427 {IWL_PCI_DEVICE(0x24F3, 0x1130, iwl8260_2ac_cfg)},
428 {IWL_PCI_DEVICE(0x24F3, 0x0132, iwl8260_2ac_cfg)},
429 {IWL_PCI_DEVICE(0x24F3, 0x1132, iwl8260_2ac_cfg)},
426 {IWL_PCI_DEVICE(0x24F3, 0x0110, iwl8260_2ac_cfg)}, 430 {IWL_PCI_DEVICE(0x24F3, 0x0110, iwl8260_2ac_cfg)},
431 {IWL_PCI_DEVICE(0x24F3, 0x01F0, iwl8260_2ac_cfg)},
432 {IWL_PCI_DEVICE(0x24F3, 0x0012, iwl8260_2ac_cfg)},
433 {IWL_PCI_DEVICE(0x24F3, 0x1012, iwl8260_2ac_cfg)},
427 {IWL_PCI_DEVICE(0x24F3, 0x1110, iwl8260_2ac_cfg)}, 434 {IWL_PCI_DEVICE(0x24F3, 0x1110, iwl8260_2ac_cfg)},
428 {IWL_PCI_DEVICE(0x24F3, 0x0050, iwl8260_2ac_cfg)}, 435 {IWL_PCI_DEVICE(0x24F3, 0x0050, iwl8260_2ac_cfg)},
429 {IWL_PCI_DEVICE(0x24F3, 0x0250, iwl8260_2ac_cfg)}, 436 {IWL_PCI_DEVICE(0x24F3, 0x0250, iwl8260_2ac_cfg)},
430 {IWL_PCI_DEVICE(0x24F3, 0x1050, iwl8260_2ac_cfg)}, 437 {IWL_PCI_DEVICE(0x24F3, 0x1050, iwl8260_2ac_cfg)},
431 {IWL_PCI_DEVICE(0x24F3, 0x0150, iwl8260_2ac_cfg)}, 438 {IWL_PCI_DEVICE(0x24F3, 0x0150, iwl8260_2ac_cfg)},
439 {IWL_PCI_DEVICE(0x24F3, 0x1150, iwl8260_2ac_cfg)},
432 {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)}, 440 {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)},
433 {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)},
434 {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)}, 441 {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)},
435 {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)}, 442 {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)},
436 {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)}, 443 {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)},
@@ -438,18 +445,28 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
438 {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)}, 445 {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)},
439 {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)}, 446 {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)},
440 {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)}, 447 {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)},
448 {IWL_PCI_DEVICE(0x24F3, 0x8110, iwl8260_2ac_cfg)},
441 {IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8260_2ac_cfg)}, 449 {IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8260_2ac_cfg)},
450 {IWL_PCI_DEVICE(0x24F3, 0x9110, iwl8260_2ac_cfg)},
442 {IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)}, 451 {IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)},
443 {IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)}, 452 {IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)},
453 {IWL_PCI_DEVICE(0x24F3, 0x8130, iwl8260_2ac_cfg)},
454 {IWL_PCI_DEVICE(0x24F3, 0x9130, iwl8260_2ac_cfg)},
455 {IWL_PCI_DEVICE(0x24F3, 0x8132, iwl8260_2ac_cfg)},
456 {IWL_PCI_DEVICE(0x24F3, 0x9132, iwl8260_2ac_cfg)},
444 {IWL_PCI_DEVICE(0x24F3, 0x8050, iwl8260_2ac_cfg)}, 457 {IWL_PCI_DEVICE(0x24F3, 0x8050, iwl8260_2ac_cfg)},
458 {IWL_PCI_DEVICE(0x24F3, 0x8150, iwl8260_2ac_cfg)},
445 {IWL_PCI_DEVICE(0x24F3, 0x9050, iwl8260_2ac_cfg)}, 459 {IWL_PCI_DEVICE(0x24F3, 0x9050, iwl8260_2ac_cfg)},
460 {IWL_PCI_DEVICE(0x24F3, 0x9150, iwl8260_2ac_cfg)},
446 {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)}, 461 {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)},
462 {IWL_PCI_DEVICE(0x24F3, 0x0044, iwl8260_2n_cfg)},
447 {IWL_PCI_DEVICE(0x24F5, 0x0010, iwl4165_2ac_cfg)}, 463 {IWL_PCI_DEVICE(0x24F5, 0x0010, iwl4165_2ac_cfg)},
448 {IWL_PCI_DEVICE(0x24F6, 0x0030, iwl4165_2ac_cfg)}, 464 {IWL_PCI_DEVICE(0x24F6, 0x0030, iwl4165_2ac_cfg)},
449 {IWL_PCI_DEVICE(0x24F3, 0x0810, iwl8260_2ac_cfg)}, 465 {IWL_PCI_DEVICE(0x24F3, 0x0810, iwl8260_2ac_cfg)},
450 {IWL_PCI_DEVICE(0x24F3, 0x0910, iwl8260_2ac_cfg)}, 466 {IWL_PCI_DEVICE(0x24F3, 0x0910, iwl8260_2ac_cfg)},
451 {IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8260_2ac_cfg)}, 467 {IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8260_2ac_cfg)},
452 {IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)}, 468 {IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)},
469 {IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)},
453#endif /* CONFIG_IWLMVM */ 470#endif /* CONFIG_IWLMVM */
454 471
455 {0} 472 {0}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index 6e9418ed90c2..bbb789f8990b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -2272,7 +2272,7 @@ void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw)
2272 struct rtl_priv *rtlpriv = rtl_priv(hw); 2272 struct rtl_priv *rtlpriv = rtl_priv(hw);
2273 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 2273 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
2274 2274
2275 if (!rtlpci->int_clear) 2275 if (rtlpci->int_clear)
2276 rtl8821ae_clear_interrupt(hw);/*clear it here first*/ 2276 rtl8821ae_clear_interrupt(hw);/*clear it here first*/
2277 2277
2278 rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF); 2278 rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
index 8ee141a55bc5..142bdff4ed60 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
@@ -448,7 +448,7 @@ MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
448MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n"); 448MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n");
449MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); 449MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
450MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n"); 450MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n");
451MODULE_PARM_DESC(int_clear, "Set to 1 to disable interrupt clear before set (default 0)\n"); 451MODULE_PARM_DESC(int_clear, "Set to 0 to disable interrupt clear before set (default 1)\n");
452 452
453static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); 453static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
454 454
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
index 219dc206fa5f..a5fe23952586 100644
--- a/drivers/nvme/host/Makefile
+++ b/drivers/nvme/host/Makefile
@@ -1,4 +1,5 @@
1 1
2obj-$(CONFIG_BLK_DEV_NVME) += nvme.o 2obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
3 3
4nvme-y += pci.o scsi.o lightnvm.o 4lightnvm-$(CONFIG_NVM) := lightnvm.o
5nvme-y += pci.o scsi.o $(lightnvm-y)
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index e0b7b95813bc..06c336410235 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -22,8 +22,6 @@
22 22
23#include "nvme.h" 23#include "nvme.h"
24 24
25#ifdef CONFIG_NVM
26
27#include <linux/nvme.h> 25#include <linux/nvme.h>
28#include <linux/bitops.h> 26#include <linux/bitops.h>
29#include <linux/lightnvm.h> 27#include <linux/lightnvm.h>
@@ -93,7 +91,7 @@ struct nvme_nvm_l2ptbl {
93 __le16 cdw14[6]; 91 __le16 cdw14[6];
94}; 92};
95 93
96struct nvme_nvm_bbtbl { 94struct nvme_nvm_getbbtbl {
97 __u8 opcode; 95 __u8 opcode;
98 __u8 flags; 96 __u8 flags;
99 __u16 command_id; 97 __u16 command_id;
@@ -101,10 +99,23 @@ struct nvme_nvm_bbtbl {
101 __u64 rsvd[2]; 99 __u64 rsvd[2];
102 __le64 prp1; 100 __le64 prp1;
103 __le64 prp2; 101 __le64 prp2;
104 __le32 prp1_len; 102 __le64 spba;
105 __le32 prp2_len; 103 __u32 rsvd4[4];
106 __le32 lbb; 104};
107 __u32 rsvd11[3]; 105
106struct nvme_nvm_setbbtbl {
107 __u8 opcode;
108 __u8 flags;
109 __u16 command_id;
110 __le32 nsid;
111 __le64 rsvd[2];
112 __le64 prp1;
113 __le64 prp2;
114 __le64 spba;
115 __le16 nlb;
116 __u8 value;
117 __u8 rsvd3;
118 __u32 rsvd4[3];
108}; 119};
109 120
110struct nvme_nvm_erase_blk { 121struct nvme_nvm_erase_blk {
@@ -129,8 +140,8 @@ struct nvme_nvm_command {
129 struct nvme_nvm_hb_rw hb_rw; 140 struct nvme_nvm_hb_rw hb_rw;
130 struct nvme_nvm_ph_rw ph_rw; 141 struct nvme_nvm_ph_rw ph_rw;
131 struct nvme_nvm_l2ptbl l2p; 142 struct nvme_nvm_l2ptbl l2p;
132 struct nvme_nvm_bbtbl get_bb; 143 struct nvme_nvm_getbbtbl get_bb;
133 struct nvme_nvm_bbtbl set_bb; 144 struct nvme_nvm_setbbtbl set_bb;
134 struct nvme_nvm_erase_blk erase; 145 struct nvme_nvm_erase_blk erase;
135 }; 146 };
136}; 147};
@@ -142,11 +153,13 @@ struct nvme_nvm_id_group {
142 __u8 num_ch; 153 __u8 num_ch;
143 __u8 num_lun; 154 __u8 num_lun;
144 __u8 num_pln; 155 __u8 num_pln;
156 __u8 rsvd1;
145 __le16 num_blk; 157 __le16 num_blk;
146 __le16 num_pg; 158 __le16 num_pg;
147 __le16 fpg_sz; 159 __le16 fpg_sz;
148 __le16 csecs; 160 __le16 csecs;
149 __le16 sos; 161 __le16 sos;
162 __le16 rsvd2;
150 __le32 trdt; 163 __le32 trdt;
151 __le32 trdm; 164 __le32 trdm;
152 __le32 tprt; 165 __le32 tprt;
@@ -154,8 +167,9 @@ struct nvme_nvm_id_group {
154 __le32 tbet; 167 __le32 tbet;
155 __le32 tbem; 168 __le32 tbem;
156 __le32 mpos; 169 __le32 mpos;
170 __le32 mccap;
157 __le16 cpar; 171 __le16 cpar;
158 __u8 reserved[913]; 172 __u8 reserved[906];
159} __packed; 173} __packed;
160 174
161struct nvme_nvm_addr_format { 175struct nvme_nvm_addr_format {
@@ -178,15 +192,28 @@ struct nvme_nvm_id {
178 __u8 ver_id; 192 __u8 ver_id;
179 __u8 vmnt; 193 __u8 vmnt;
180 __u8 cgrps; 194 __u8 cgrps;
181 __u8 res[5]; 195 __u8 res;
182 __le32 cap; 196 __le32 cap;
183 __le32 dom; 197 __le32 dom;
184 struct nvme_nvm_addr_format ppaf; 198 struct nvme_nvm_addr_format ppaf;
185 __u8 ppat; 199 __u8 resv[228];
186 __u8 resv[223];
187 struct nvme_nvm_id_group groups[4]; 200 struct nvme_nvm_id_group groups[4];
188} __packed; 201} __packed;
189 202
203struct nvme_nvm_bb_tbl {
204 __u8 tblid[4];
205 __le16 verid;
206 __le16 revid;
207 __le32 rvsd1;
208 __le32 tblks;
209 __le32 tfact;
210 __le32 tgrown;
211 __le32 tdresv;
212 __le32 thresv;
213 __le32 rsvd2[8];
214 __u8 blk[0];
215};
216
190/* 217/*
191 * Check we didn't inadvertently grow the command struct 218 * Check we didn't inadvertently grow the command struct
192 */ 219 */
@@ -195,12 +222,14 @@ static inline void _nvme_nvm_check_size(void)
195 BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64); 222 BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
196 BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64); 223 BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64);
197 BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64); 224 BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
198 BUILD_BUG_ON(sizeof(struct nvme_nvm_bbtbl) != 64); 225 BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
226 BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
199 BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64); 227 BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64);
200 BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64); 228 BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
201 BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960); 229 BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960);
202 BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 128); 230 BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 128);
203 BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != 4096); 231 BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != 4096);
232 BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 512);
204} 233}
205 234
206static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) 235static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
@@ -234,6 +263,7 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
234 dst->tbet = le32_to_cpu(src->tbet); 263 dst->tbet = le32_to_cpu(src->tbet);
235 dst->tbem = le32_to_cpu(src->tbem); 264 dst->tbem = le32_to_cpu(src->tbem);
236 dst->mpos = le32_to_cpu(src->mpos); 265 dst->mpos = le32_to_cpu(src->mpos);
266 dst->mccap = le32_to_cpu(src->mccap);
237 267
238 dst->cpar = le16_to_cpu(src->cpar); 268 dst->cpar = le16_to_cpu(src->cpar);
239 } 269 }
@@ -244,6 +274,7 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
244static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id) 274static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id)
245{ 275{
246 struct nvme_ns *ns = q->queuedata; 276 struct nvme_ns *ns = q->queuedata;
277 struct nvme_dev *dev = ns->dev;
247 struct nvme_nvm_id *nvme_nvm_id; 278 struct nvme_nvm_id *nvme_nvm_id;
248 struct nvme_nvm_command c = {}; 279 struct nvme_nvm_command c = {};
249 int ret; 280 int ret;
@@ -256,8 +287,8 @@ static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id)
256 if (!nvme_nvm_id) 287 if (!nvme_nvm_id)
257 return -ENOMEM; 288 return -ENOMEM;
258 289
259 ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, nvme_nvm_id, 290 ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c,
260 sizeof(struct nvme_nvm_id)); 291 nvme_nvm_id, sizeof(struct nvme_nvm_id));
261 if (ret) { 292 if (ret) {
262 ret = -EIO; 293 ret = -EIO;
263 goto out; 294 goto out;
@@ -268,6 +299,8 @@ static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id)
268 nvm_id->cgrps = nvme_nvm_id->cgrps; 299 nvm_id->cgrps = nvme_nvm_id->cgrps;
269 nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap); 300 nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap);
270 nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom); 301 nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom);
302 memcpy(&nvm_id->ppaf, &nvme_nvm_id->ppaf,
303 sizeof(struct nvme_nvm_addr_format));
271 304
272 ret = init_grps(nvm_id, nvme_nvm_id); 305 ret = init_grps(nvm_id, nvme_nvm_id);
273out: 306out:
@@ -281,7 +314,7 @@ static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb,
281 struct nvme_ns *ns = q->queuedata; 314 struct nvme_ns *ns = q->queuedata;
282 struct nvme_dev *dev = ns->dev; 315 struct nvme_dev *dev = ns->dev;
283 struct nvme_nvm_command c = {}; 316 struct nvme_nvm_command c = {};
284 u32 len = queue_max_hw_sectors(q) << 9; 317 u32 len = queue_max_hw_sectors(dev->admin_q) << 9;
285 u32 nlb_pr_rq = len / sizeof(u64); 318 u32 nlb_pr_rq = len / sizeof(u64);
286 u64 cmd_slba = slba; 319 u64 cmd_slba = slba;
287 void *entries; 320 void *entries;
@@ -299,8 +332,8 @@ static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb,
299 c.l2p.slba = cpu_to_le64(cmd_slba); 332 c.l2p.slba = cpu_to_le64(cmd_slba);
300 c.l2p.nlb = cpu_to_le32(cmd_nlb); 333 c.l2p.nlb = cpu_to_le32(cmd_nlb);
301 334
302 ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, 335 ret = nvme_submit_sync_cmd(dev->admin_q,
303 entries, len); 336 (struct nvme_command *)&c, entries, len);
304 if (ret) { 337 if (ret) {
305 dev_err(dev->dev, "L2P table transfer failed (%d)\n", 338 dev_err(dev->dev, "L2P table transfer failed (%d)\n",
306 ret); 339 ret);
@@ -322,43 +355,84 @@ out:
322 return ret; 355 return ret;
323} 356}
324 357
325static int nvme_nvm_get_bb_tbl(struct request_queue *q, int lunid, 358static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
326 unsigned int nr_blocks, 359 int nr_blocks, nvm_bb_update_fn *update_bbtbl,
327 nvm_bb_update_fn *update_bbtbl, void *priv) 360 void *priv)
328{ 361{
362 struct request_queue *q = nvmdev->q;
329 struct nvme_ns *ns = q->queuedata; 363 struct nvme_ns *ns = q->queuedata;
330 struct nvme_dev *dev = ns->dev; 364 struct nvme_dev *dev = ns->dev;
331 struct nvme_nvm_command c = {}; 365 struct nvme_nvm_command c = {};
332 void *bb_bitmap; 366 struct nvme_nvm_bb_tbl *bb_tbl;
333 u16 bb_bitmap_size; 367 int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blocks;
334 int ret = 0; 368 int ret = 0;
335 369
336 c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl; 370 c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
337 c.get_bb.nsid = cpu_to_le32(ns->ns_id); 371 c.get_bb.nsid = cpu_to_le32(ns->ns_id);
338 c.get_bb.lbb = cpu_to_le32(lunid); 372 c.get_bb.spba = cpu_to_le64(ppa.ppa);
339 bb_bitmap_size = ((nr_blocks >> 15) + 1) * PAGE_SIZE;
340 bb_bitmap = kmalloc(bb_bitmap_size, GFP_KERNEL);
341 if (!bb_bitmap)
342 return -ENOMEM;
343 373
344 bitmap_zero(bb_bitmap, nr_blocks); 374 bb_tbl = kzalloc(tblsz, GFP_KERNEL);
375 if (!bb_tbl)
376 return -ENOMEM;
345 377
346 ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, bb_bitmap, 378 ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c,
347 bb_bitmap_size); 379 bb_tbl, tblsz);
348 if (ret) { 380 if (ret) {
349 dev_err(dev->dev, "get bad block table failed (%d)\n", ret); 381 dev_err(dev->dev, "get bad block table failed (%d)\n", ret);
350 ret = -EIO; 382 ret = -EIO;
351 goto out; 383 goto out;
352 } 384 }
353 385
354 ret = update_bbtbl(lunid, bb_bitmap, nr_blocks, priv); 386 if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
387 bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
388 dev_err(dev->dev, "bbt format mismatch\n");
389 ret = -EINVAL;
390 goto out;
391 }
392
393 if (le16_to_cpu(bb_tbl->verid) != 1) {
394 ret = -EINVAL;
395 dev_err(dev->dev, "bbt version not supported\n");
396 goto out;
397 }
398
399 if (le32_to_cpu(bb_tbl->tblks) != nr_blocks) {
400 ret = -EINVAL;
401 dev_err(dev->dev, "bbt unsuspected blocks returned (%u!=%u)",
402 le32_to_cpu(bb_tbl->tblks), nr_blocks);
403 goto out;
404 }
405
406 ppa = dev_to_generic_addr(nvmdev, ppa);
407 ret = update_bbtbl(ppa, nr_blocks, bb_tbl->blk, priv);
355 if (ret) { 408 if (ret) {
356 ret = -EINTR; 409 ret = -EINTR;
357 goto out; 410 goto out;
358 } 411 }
359 412
360out: 413out:
361 kfree(bb_bitmap); 414 kfree(bb_tbl);
415 return ret;
416}
417
418static int nvme_nvm_set_bb_tbl(struct request_queue *q, struct nvm_rq *rqd,
419 int type)
420{
421 struct nvme_ns *ns = q->queuedata;
422 struct nvme_dev *dev = ns->dev;
423 struct nvme_nvm_command c = {};
424 int ret = 0;
425
426 c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
427 c.set_bb.nsid = cpu_to_le32(ns->ns_id);
428 c.set_bb.spba = cpu_to_le64(rqd->ppa_addr.ppa);
429 c.set_bb.nlb = cpu_to_le16(rqd->nr_pages - 1);
430 c.set_bb.value = type;
431
432 ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c,
433 NULL, 0);
434 if (ret)
435 dev_err(dev->dev, "set bad block table failed (%d)\n", ret);
362 return ret; 436 return ret;
363} 437}
364 438
@@ -474,6 +548,7 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = {
474 .get_l2p_tbl = nvme_nvm_get_l2p_tbl, 548 .get_l2p_tbl = nvme_nvm_get_l2p_tbl,
475 549
476 .get_bb_tbl = nvme_nvm_get_bb_tbl, 550 .get_bb_tbl = nvme_nvm_get_bb_tbl,
551 .set_bb_tbl = nvme_nvm_set_bb_tbl,
477 552
478 .submit_io = nvme_nvm_submit_io, 553 .submit_io = nvme_nvm_submit_io,
479 .erase_block = nvme_nvm_erase_block, 554 .erase_block = nvme_nvm_erase_block,
@@ -496,31 +571,27 @@ void nvme_nvm_unregister(struct request_queue *q, char *disk_name)
496 nvm_unregister(disk_name); 571 nvm_unregister(disk_name);
497} 572}
498 573
574/* move to shared place when used in multiple places. */
575#define PCI_VENDOR_ID_CNEX 0x1d1d
576#define PCI_DEVICE_ID_CNEX_WL 0x2807
577#define PCI_DEVICE_ID_CNEX_QEMU 0x1f1f
578
499int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id) 579int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
500{ 580{
501 struct nvme_dev *dev = ns->dev; 581 struct nvme_dev *dev = ns->dev;
502 struct pci_dev *pdev = to_pci_dev(dev->dev); 582 struct pci_dev *pdev = to_pci_dev(dev->dev);
503 583
504 /* QEMU NVMe simulator - PCI ID + Vendor specific bit */ 584 /* QEMU NVMe simulator - PCI ID + Vendor specific bit */
505 if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == 0x5845 && 585 if (pdev->vendor == PCI_VENDOR_ID_CNEX &&
586 pdev->device == PCI_DEVICE_ID_CNEX_QEMU &&
506 id->vs[0] == 0x1) 587 id->vs[0] == 0x1)
507 return 1; 588 return 1;
508 589
509 /* CNEX Labs - PCI ID + Vendor specific bit */ 590 /* CNEX Labs - PCI ID + Vendor specific bit */
510 if (pdev->vendor == 0x1d1d && pdev->device == 0x2807 && 591 if (pdev->vendor == PCI_VENDOR_ID_CNEX &&
592 pdev->device == PCI_DEVICE_ID_CNEX_WL &&
511 id->vs[0] == 0x1) 593 id->vs[0] == 0x1)
512 return 1; 594 return 1;
513 595
514 return 0; 596 return 0;
515} 597}
516#else
517int nvme_nvm_register(struct request_queue *q, char *disk_name)
518{
519 return 0;
520}
521void nvme_nvm_unregister(struct request_queue *q, char *disk_name) {};
522int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
523{
524 return 0;
525}
526#endif /* CONFIG_NVM */
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index fdb4e5bad9ac..044253dca30a 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -136,8 +136,22 @@ int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
136int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg); 136int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg);
137int nvme_sg_get_version_num(int __user *ip); 137int nvme_sg_get_version_num(int __user *ip);
138 138
139#ifdef CONFIG_NVM
139int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id); 140int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id);
140int nvme_nvm_register(struct request_queue *q, char *disk_name); 141int nvme_nvm_register(struct request_queue *q, char *disk_name);
141void nvme_nvm_unregister(struct request_queue *q, char *disk_name); 142void nvme_nvm_unregister(struct request_queue *q, char *disk_name);
143#else
144static inline int nvme_nvm_register(struct request_queue *q, char *disk_name)
145{
146 return 0;
147}
148
149static inline void nvme_nvm_unregister(struct request_queue *q, char *disk_name) {};
150
151static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
152{
153 return 0;
154}
155#endif /* CONFIG_NVM */
142 156
143#endif /* _NVME_H */ 157#endif /* _NVME_H */
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 8187df204695..9e294ff4e652 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -896,19 +896,28 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
896 goto retry_cmd; 896 goto retry_cmd;
897 } 897 }
898 if (blk_integrity_rq(req)) { 898 if (blk_integrity_rq(req)) {
899 if (blk_rq_count_integrity_sg(req->q, req->bio) != 1) 899 if (blk_rq_count_integrity_sg(req->q, req->bio) != 1) {
900 dma_unmap_sg(dev->dev, iod->sg, iod->nents,
901 dma_dir);
900 goto error_cmd; 902 goto error_cmd;
903 }
901 904
902 sg_init_table(iod->meta_sg, 1); 905 sg_init_table(iod->meta_sg, 1);
903 if (blk_rq_map_integrity_sg( 906 if (blk_rq_map_integrity_sg(
904 req->q, req->bio, iod->meta_sg) != 1) 907 req->q, req->bio, iod->meta_sg) != 1) {
908 dma_unmap_sg(dev->dev, iod->sg, iod->nents,
909 dma_dir);
905 goto error_cmd; 910 goto error_cmd;
911 }
906 912
907 if (rq_data_dir(req)) 913 if (rq_data_dir(req))
908 nvme_dif_remap(req, nvme_dif_prep); 914 nvme_dif_remap(req, nvme_dif_prep);
909 915
910 if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir)) 916 if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir)) {
917 dma_unmap_sg(dev->dev, iod->sg, iod->nents,
918 dma_dir);
911 goto error_cmd; 919 goto error_cmd;
920 }
912 } 921 }
913 } 922 }
914 923
@@ -968,7 +977,8 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
968 if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) 977 if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
969 return; 978 return;
970 979
971 writel(head, nvmeq->q_db + nvmeq->dev->db_stride); 980 if (likely(nvmeq->cq_vector >= 0))
981 writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
972 nvmeq->cq_head = head; 982 nvmeq->cq_head = head;
973 nvmeq->cq_phase = phase; 983 nvmeq->cq_phase = phase;
974 984
@@ -1727,9 +1737,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
1727 u32 aqa; 1737 u32 aqa;
1728 u64 cap = lo_hi_readq(&dev->bar->cap); 1738 u64 cap = lo_hi_readq(&dev->bar->cap);
1729 struct nvme_queue *nvmeq; 1739 struct nvme_queue *nvmeq;
1730 unsigned page_shift = PAGE_SHIFT; 1740 /*
1741 * default to a 4K page size, with the intention to update this
1742 * path in the future to accomodate architectures with differing
1743 * kernel and IO page sizes.
1744 */
1745 unsigned page_shift = 12;
1731 unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12; 1746 unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12;
1732 unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12;
1733 1747
1734 if (page_shift < dev_page_min) { 1748 if (page_shift < dev_page_min) {
1735 dev_err(dev->dev, 1749 dev_err(dev->dev,
@@ -1738,13 +1752,6 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
1738 1 << page_shift); 1752 1 << page_shift);
1739 return -ENODEV; 1753 return -ENODEV;
1740 } 1754 }
1741 if (page_shift > dev_page_max) {
1742 dev_info(dev->dev,
1743 "Device maximum page size (%u) smaller than "
1744 "host (%u); enabling work-around\n",
1745 1 << dev_page_max, 1 << page_shift);
1746 page_shift = dev_page_max;
1747 }
1748 1755
1749 dev->subsystem = readl(&dev->bar->vs) >= NVME_VS(1, 1) ? 1756 dev->subsystem = readl(&dev->bar->vs) >= NVME_VS(1, 1) ?
1750 NVME_CAP_NSSRC(cap) : 0; 1757 NVME_CAP_NSSRC(cap) : 0;
@@ -2268,7 +2275,7 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
2268 if (dev->max_hw_sectors) { 2275 if (dev->max_hw_sectors) {
2269 blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); 2276 blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
2270 blk_queue_max_segments(ns->queue, 2277 blk_queue_max_segments(ns->queue,
2271 ((dev->max_hw_sectors << 9) / dev->page_size) + 1); 2278 (dev->max_hw_sectors / (dev->page_size >> 9)) + 1);
2272 } 2279 }
2273 if (dev->stripe_size) 2280 if (dev->stripe_size)
2274 blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9); 2281 blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9);
@@ -2701,6 +2708,18 @@ static int nvme_dev_map(struct nvme_dev *dev)
2701 dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH); 2708 dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH);
2702 dev->db_stride = 1 << NVME_CAP_STRIDE(cap); 2709 dev->db_stride = 1 << NVME_CAP_STRIDE(cap);
2703 dev->dbs = ((void __iomem *)dev->bar) + 4096; 2710 dev->dbs = ((void __iomem *)dev->bar) + 4096;
2711
2712 /*
2713 * Temporary fix for the Apple controller found in the MacBook8,1 and
2714 * some MacBook7,1 to avoid controller resets and data loss.
2715 */
2716 if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) {
2717 dev->q_depth = 2;
2718 dev_warn(dev->dev, "detected Apple NVMe controller, set "
2719 "queue depth=%u to work around controller resets\n",
2720 dev->q_depth);
2721 }
2722
2704 if (readl(&dev->bar->vs) >= NVME_VS(1, 2)) 2723 if (readl(&dev->bar->vs) >= NVME_VS(1, 2))
2705 dev->cmb = nvme_map_cmb(dev); 2724 dev->cmb = nvme_map_cmb(dev);
2706 2725
@@ -2787,6 +2806,10 @@ static void nvme_del_queue_end(struct nvme_queue *nvmeq)
2787{ 2806{
2788 struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx; 2807 struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx;
2789 nvme_put_dq(dq); 2808 nvme_put_dq(dq);
2809
2810 spin_lock_irq(&nvmeq->q_lock);
2811 nvme_process_cq(nvmeq);
2812 spin_unlock_irq(&nvmeq->q_lock);
2790} 2813}
2791 2814
2792static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode, 2815static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode,
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index 540f077c37ea..02a7452bdf23 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -440,7 +440,6 @@ int dw_pcie_host_init(struct pcie_port *pp)
440 ret, pp->io); 440 ret, pp->io);
441 continue; 441 continue;
442 } 442 }
443 pp->io_base = pp->io->start;
444 break; 443 break;
445 case IORESOURCE_MEM: 444 case IORESOURCE_MEM:
446 pp->mem = win->res; 445 pp->mem = win->res;
diff --git a/drivers/pci/host/pcie-hisi.c b/drivers/pci/host/pcie-hisi.c
index 35457ecd8e70..163671a4f798 100644
--- a/drivers/pci/host/pcie-hisi.c
+++ b/drivers/pci/host/pcie-hisi.c
@@ -111,7 +111,7 @@ static struct pcie_host_ops hisi_pcie_host_ops = {
111 .link_up = hisi_pcie_link_up, 111 .link_up = hisi_pcie_link_up,
112}; 112};
113 113
114static int __init hisi_add_pcie_port(struct pcie_port *pp, 114static int hisi_add_pcie_port(struct pcie_port *pp,
115 struct platform_device *pdev) 115 struct platform_device *pdev)
116{ 116{
117 int ret; 117 int ret;
@@ -139,7 +139,7 @@ static int __init hisi_add_pcie_port(struct pcie_port *pp,
139 return 0; 139 return 0;
140} 140}
141 141
142static int __init hisi_pcie_probe(struct platform_device *pdev) 142static int hisi_pcie_probe(struct platform_device *pdev)
143{ 143{
144 struct hisi_pcie *hisi_pcie; 144 struct hisi_pcie *hisi_pcie;
145 struct pcie_port *pp; 145 struct pcie_port *pp;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 4446fcb5effd..d7ffd66814bb 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -1146,9 +1146,21 @@ static int pci_pm_runtime_suspend(struct device *dev)
1146 pci_dev->state_saved = false; 1146 pci_dev->state_saved = false;
1147 pci_dev->no_d3cold = false; 1147 pci_dev->no_d3cold = false;
1148 error = pm->runtime_suspend(dev); 1148 error = pm->runtime_suspend(dev);
1149 suspend_report_result(pm->runtime_suspend, error); 1149 if (error) {
1150 if (error) 1150 /*
1151 * -EBUSY and -EAGAIN is used to request the runtime PM core
1152 * to schedule a new suspend, so log the event only with debug
1153 * log level.
1154 */
1155 if (error == -EBUSY || error == -EAGAIN)
1156 dev_dbg(dev, "can't suspend now (%pf returned %d)\n",
1157 pm->runtime_suspend, error);
1158 else
1159 dev_err(dev, "can't suspend (%pf returned %d)\n",
1160 pm->runtime_suspend, error);
1161
1151 return error; 1162 return error;
1163 }
1152 if (!pci_dev->d3cold_allowed) 1164 if (!pci_dev->d3cold_allowed)
1153 pci_dev->no_d3cold = true; 1165 pci_dev->no_d3cold = true;
1154 1166
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 92618686604c..eead54cd01b2 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -216,7 +216,10 @@ static ssize_t numa_node_store(struct device *dev,
216 if (ret) 216 if (ret)
217 return ret; 217 return ret;
218 218
219 if (node >= MAX_NUMNODES || !node_online(node)) 219 if ((node < 0 && node != NUMA_NO_NODE) || node >= MAX_NUMNODES)
220 return -EINVAL;
221
222 if (node != NUMA_NO_NODE && !node_online(node))
220 return -EINVAL; 223 return -EINVAL;
221 224
222 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); 225 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index fd2f03fa53f3..d390fc1475ec 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -337,6 +337,4 @@ static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe)
337} 337}
338#endif 338#endif
339 339
340struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
341
342#endif /* DRIVERS_PCI_H */ 340#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index e735c728e3b3..edb1984201e9 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1685,8 +1685,8 @@ static void pci_dma_configure(struct pci_dev *dev)
1685{ 1685{
1686 struct device *bridge = pci_get_host_bridge_device(dev); 1686 struct device *bridge = pci_get_host_bridge_device(dev);
1687 1687
1688 if (IS_ENABLED(CONFIG_OF) && dev->dev.of_node) { 1688 if (IS_ENABLED(CONFIG_OF) &&
1689 if (bridge->parent) 1689 bridge->parent && bridge->parent->of_node) {
1690 of_dma_configure(&dev->dev, bridge->parent->of_node); 1690 of_dma_configure(&dev->dev, bridge->parent->of_node);
1691 } else if (has_acpi_companion(bridge)) { 1691 } else if (has_acpi_companion(bridge)) {
1692 struct acpi_device *adev = to_acpi_device_node(bridge->fwnode); 1692 struct acpi_device *adev = to_acpi_device_node(bridge->fwnode);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 7e327309cf69..c2dd52ea4198 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3405,7 +3405,9 @@ static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
3405 return 0; 3405 return 0;
3406} 3406}
3407 3407
3408#include "../gpu/drm/i915/i915_reg.h" 3408#define SOUTH_CHICKEN2 0xc2004
3409#define PCH_PP_STATUS 0xc7200
3410#define PCH_PP_CONTROL 0xc7204
3409#define MSG_CTL 0x45010 3411#define MSG_CTL 0x45010
3410#define NSDE_PWR_STATE 0xd0100 3412#define NSDE_PWR_STATE 0xd0100
3411#define IGD_OPERATION_TIMEOUT 10000 /* set timeout 10 seconds */ 3413#define IGD_OPERATION_TIMEOUT 10000 /* set timeout 10 seconds */
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index b422e4ed73f4..312c78b27a32 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -5,8 +5,6 @@
5config PINCTRL 5config PINCTRL
6 bool 6 bool
7 7
8if PINCTRL
9
10menu "Pin controllers" 8menu "Pin controllers"
11 depends on PINCTRL 9 depends on PINCTRL
12 10
@@ -274,5 +272,3 @@ config PINCTRL_TB10X
274 select GPIOLIB 272 select GPIOLIB
275 273
276endmenu 274endmenu
277
278endif
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
index 88a7fac11bd4..acaf84cadca3 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
@@ -538,8 +538,10 @@ static int imx1_pinctrl_parse_functions(struct device_node *np,
538 func->groups[i] = child->name; 538 func->groups[i] = child->name;
539 grp = &info->groups[grp_index++]; 539 grp = &info->groups[grp_index++];
540 ret = imx1_pinctrl_parse_groups(child, grp, info, i++); 540 ret = imx1_pinctrl_parse_groups(child, grp, info, i++);
541 if (ret == -ENOMEM) 541 if (ret == -ENOMEM) {
542 of_node_put(child);
542 return ret; 543 return ret;
544 }
543 } 545 }
544 546
545 return 0; 547 return 0;
@@ -582,8 +584,10 @@ static int imx1_pinctrl_parse_dt(struct platform_device *pdev,
582 584
583 for_each_child_of_node(np, child) { 585 for_each_child_of_node(np, child) {
584 ret = imx1_pinctrl_parse_functions(child, info, ifunc++); 586 ret = imx1_pinctrl_parse_functions(child, info, ifunc++);
585 if (ret == -ENOMEM) 587 if (ret == -ENOMEM) {
588 of_node_put(child);
586 return -ENOMEM; 589 return -ENOMEM;
590 }
587 } 591 }
588 592
589 return 0; 593 return 0;
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index f307f1d27d64..5c717275a7fa 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -747,7 +747,7 @@ static int mtk_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
747 reg_addr = mtk_get_port(pctl, offset) + pctl->devdata->dir_offset; 747 reg_addr = mtk_get_port(pctl, offset) + pctl->devdata->dir_offset;
748 bit = BIT(offset & 0xf); 748 bit = BIT(offset & 0xf);
749 regmap_read(pctl->regmap1, reg_addr, &read_val); 749 regmap_read(pctl->regmap1, reg_addr, &read_val);
750 return !!(read_val & bit); 750 return !(read_val & bit);
751} 751}
752 752
753static int mtk_gpio_get(struct gpio_chip *chip, unsigned offset) 753static int mtk_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -757,12 +757,8 @@ static int mtk_gpio_get(struct gpio_chip *chip, unsigned offset)
757 unsigned int read_val = 0; 757 unsigned int read_val = 0;
758 struct mtk_pinctrl *pctl = dev_get_drvdata(chip->dev); 758 struct mtk_pinctrl *pctl = dev_get_drvdata(chip->dev);
759 759
760 if (mtk_gpio_get_direction(chip, offset)) 760 reg_addr = mtk_get_port(pctl, offset) +
761 reg_addr = mtk_get_port(pctl, offset) + 761 pctl->devdata->din_offset;
762 pctl->devdata->dout_offset;
763 else
764 reg_addr = mtk_get_port(pctl, offset) +
765 pctl->devdata->din_offset;
766 762
767 bit = BIT(offset & 0xf); 763 bit = BIT(offset & 0xf);
768 regmap_read(pctl->regmap1, reg_addr, &read_val); 764 regmap_read(pctl->regmap1, reg_addr, &read_val);
@@ -997,6 +993,7 @@ static struct gpio_chip mtk_gpio_chip = {
997 .owner = THIS_MODULE, 993 .owner = THIS_MODULE,
998 .request = gpiochip_generic_request, 994 .request = gpiochip_generic_request,
999 .free = gpiochip_generic_free, 995 .free = gpiochip_generic_free,
996 .get_direction = mtk_gpio_get_direction,
1000 .direction_input = mtk_gpio_direction_input, 997 .direction_input = mtk_gpio_direction_input,
1001 .direction_output = mtk_gpio_direction_output, 998 .direction_output = mtk_gpio_direction_output,
1002 .get = mtk_gpio_get, 999 .get = mtk_gpio_get,
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
index d809c9eaa323..19a3c3bc2f1f 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -672,7 +672,7 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev)
672 return -ENOMEM; 672 return -ENOMEM;
673 673
674 pctrl->dev = &pdev->dev; 674 pctrl->dev = &pdev->dev;
675 pctrl->npins = (unsigned)of_device_get_match_data(&pdev->dev); 675 pctrl->npins = (unsigned long)of_device_get_match_data(&pdev->dev);
676 676
677 pctrl->regmap = dev_get_regmap(pdev->dev.parent, NULL); 677 pctrl->regmap = dev_get_regmap(pdev->dev.parent, NULL);
678 if (!pctrl->regmap) { 678 if (!pctrl->regmap) {
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
index 8982027de8e8..b868ef1766a0 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
@@ -763,7 +763,7 @@ static int pm8xxx_mpp_probe(struct platform_device *pdev)
763 return -ENOMEM; 763 return -ENOMEM;
764 764
765 pctrl->dev = &pdev->dev; 765 pctrl->dev = &pdev->dev;
766 pctrl->npins = (unsigned)of_device_get_match_data(&pdev->dev); 766 pctrl->npins = (unsigned long)of_device_get_match_data(&pdev->dev);
767 767
768 pctrl->regmap = dev_get_regmap(pdev->dev.parent, NULL); 768 pctrl->regmap = dev_get_regmap(pdev->dev.parent, NULL);
769 if (!pctrl->regmap) { 769 if (!pctrl->regmap) {
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7734.c b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
index e7deb51de7dc..9842bb106796 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7734.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
@@ -31,11 +31,11 @@
31 PORT_GP_12(5, fn, sfx) 31 PORT_GP_12(5, fn, sfx)
32 32
33#undef _GP_DATA 33#undef _GP_DATA
34#define _GP_DATA(bank, pin, name, sfx) \ 34#define _GP_DATA(bank, pin, name, sfx, cfg) \
35 PINMUX_DATA(name##_DATA, name##_FN, name##_IN, name##_OUT) 35 PINMUX_DATA(name##_DATA, name##_FN, name##_IN, name##_OUT)
36 36
37#define _GP_INOUTSEL(bank, pin, name, sfx) name##_IN, name##_OUT 37#define _GP_INOUTSEL(bank, pin, name, sfx, cfg) name##_IN, name##_OUT
38#define _GP_INDT(bank, pin, name, sfx) name##_DATA 38#define _GP_INDT(bank, pin, name, sfx, cfg) name##_DATA
39#define GP_INOUTSEL(bank) PORT_GP_32_REV(bank, _GP_INOUTSEL, unused) 39#define GP_INOUTSEL(bank) PORT_GP_32_REV(bank, _GP_INOUTSEL, unused)
40#define GP_INDT(bank) PORT_GP_32_REV(bank, _GP_INDT, unused) 40#define GP_INDT(bank) PORT_GP_32_REV(bank, _GP_INDT, unused)
41 41
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 8b3130f22b42..9e03d158f411 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -1478,6 +1478,8 @@ module_init(remoteproc_init);
1478 1478
1479static void __exit remoteproc_exit(void) 1479static void __exit remoteproc_exit(void)
1480{ 1480{
1481 ida_destroy(&rproc_dev_index);
1482
1481 rproc_exit_debugfs(); 1483 rproc_exit_debugfs();
1482} 1484}
1483module_exit(remoteproc_exit); 1485module_exit(remoteproc_exit);
diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c
index 9d30809bb407..916af5096f57 100644
--- a/drivers/remoteproc/remoteproc_debugfs.c
+++ b/drivers/remoteproc/remoteproc_debugfs.c
@@ -156,7 +156,7 @@ rproc_recovery_write(struct file *filp, const char __user *user_buf,
156 char buf[10]; 156 char buf[10];
157 int ret; 157 int ret;
158 158
159 if (count > sizeof(buf)) 159 if (count < 1 || count > sizeof(buf))
160 return count; 160 return count;
161 161
162 ret = copy_from_user(buf, user_buf, count); 162 ret = copy_from_user(buf, user_buf, count);
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 188006c55ce0..aa705bb4748c 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -15,9 +15,6 @@
15#include <linux/i2c.h> 15#include <linux/i2c.h>
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/of_device.h>
19#include <linux/of_irq.h>
20#include <linux/pm_wakeirq.h>
21#include <linux/rtc/ds1307.h> 18#include <linux/rtc/ds1307.h>
22#include <linux/rtc.h> 19#include <linux/rtc.h>
23#include <linux/slab.h> 20#include <linux/slab.h>
@@ -117,7 +114,6 @@ struct ds1307 {
117#define HAS_ALARM 1 /* bit 1 == irq claimed */ 114#define HAS_ALARM 1 /* bit 1 == irq claimed */
118 struct i2c_client *client; 115 struct i2c_client *client;
119 struct rtc_device *rtc; 116 struct rtc_device *rtc;
120 int wakeirq;
121 s32 (*read_block_data)(const struct i2c_client *client, u8 command, 117 s32 (*read_block_data)(const struct i2c_client *client, u8 command,
122 u8 length, u8 *values); 118 u8 length, u8 *values);
123 s32 (*write_block_data)(const struct i2c_client *client, u8 command, 119 s32 (*write_block_data)(const struct i2c_client *client, u8 command,
@@ -1138,7 +1134,10 @@ read_rtc:
1138 bin2bcd(tmp)); 1134 bin2bcd(tmp));
1139 } 1135 }
1140 1136
1141 device_set_wakeup_capable(&client->dev, want_irq); 1137 if (want_irq) {
1138 device_set_wakeup_capable(&client->dev, true);
1139 set_bit(HAS_ALARM, &ds1307->flags);
1140 }
1142 ds1307->rtc = devm_rtc_device_register(&client->dev, client->name, 1141 ds1307->rtc = devm_rtc_device_register(&client->dev, client->name,
1143 rtc_ops, THIS_MODULE); 1142 rtc_ops, THIS_MODULE);
1144 if (IS_ERR(ds1307->rtc)) { 1143 if (IS_ERR(ds1307->rtc)) {
@@ -1146,43 +1145,19 @@ read_rtc:
1146 } 1145 }
1147 1146
1148 if (want_irq) { 1147 if (want_irq) {
1149 struct device_node *node = client->dev.of_node;
1150
1151 err = devm_request_threaded_irq(&client->dev, 1148 err = devm_request_threaded_irq(&client->dev,
1152 client->irq, NULL, irq_handler, 1149 client->irq, NULL, irq_handler,
1153 IRQF_SHARED | IRQF_ONESHOT, 1150 IRQF_SHARED | IRQF_ONESHOT,
1154 ds1307->rtc->name, client); 1151 ds1307->rtc->name, client);
1155 if (err) { 1152 if (err) {
1156 client->irq = 0; 1153 client->irq = 0;
1154 device_set_wakeup_capable(&client->dev, false);
1155 clear_bit(HAS_ALARM, &ds1307->flags);
1157 dev_err(&client->dev, "unable to request IRQ!\n"); 1156 dev_err(&client->dev, "unable to request IRQ!\n");
1158 goto no_irq; 1157 } else
1159 } 1158 dev_dbg(&client->dev, "got IRQ %d\n", client->irq);
1160
1161 set_bit(HAS_ALARM, &ds1307->flags);
1162 dev_dbg(&client->dev, "got IRQ %d\n", client->irq);
1163
1164 /* Currently supported by OF code only! */
1165 if (!node)
1166 goto no_irq;
1167
1168 err = of_irq_get(node, 1);
1169 if (err <= 0) {
1170 if (err == -EPROBE_DEFER)
1171 goto exit;
1172 goto no_irq;
1173 }
1174 ds1307->wakeirq = err;
1175
1176 err = dev_pm_set_dedicated_wake_irq(&client->dev,
1177 ds1307->wakeirq);
1178 if (err) {
1179 dev_err(&client->dev, "unable to setup wakeIRQ %d!\n",
1180 err);
1181 goto exit;
1182 }
1183 } 1159 }
1184 1160
1185no_irq:
1186 if (chip->nvram_size) { 1161 if (chip->nvram_size) {
1187 1162
1188 ds1307->nvram = devm_kzalloc(&client->dev, 1163 ds1307->nvram = devm_kzalloc(&client->dev,
@@ -1226,9 +1201,6 @@ static int ds1307_remove(struct i2c_client *client)
1226{ 1201{
1227 struct ds1307 *ds1307 = i2c_get_clientdata(client); 1202 struct ds1307 *ds1307 = i2c_get_clientdata(client);
1228 1203
1229 if (ds1307->wakeirq)
1230 dev_pm_clear_wake_irq(&client->dev);
1231
1232 if (test_and_clear_bit(HAS_NVRAM, &ds1307->flags)) 1204 if (test_and_clear_bit(HAS_NVRAM, &ds1307->flags))
1233 sysfs_remove_bin_file(&client->dev.kobj, ds1307->nvram); 1205 sysfs_remove_bin_file(&client->dev.kobj, ds1307->nvram);
1234 1206
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 548a18916a31..a831d18596a5 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -1080,28 +1080,10 @@ void __init chsc_init_cleanup(void)
1080 free_page((unsigned long)sei_page); 1080 free_page((unsigned long)sei_page);
1081} 1081}
1082 1082
1083int chsc_enable_facility(int operation_code) 1083int __chsc_enable_facility(struct chsc_sda_area *sda_area, int operation_code)
1084{ 1084{
1085 unsigned long flags;
1086 int ret; 1085 int ret;
1087 struct {
1088 struct chsc_header request;
1089 u8 reserved1:4;
1090 u8 format:4;
1091 u8 reserved2;
1092 u16 operation_code;
1093 u32 reserved3;
1094 u32 reserved4;
1095 u32 operation_data_area[252];
1096 struct chsc_header response;
1097 u32 reserved5:4;
1098 u32 format2:4;
1099 u32 reserved6:24;
1100 } __attribute__ ((packed)) *sda_area;
1101 1086
1102 spin_lock_irqsave(&chsc_page_lock, flags);
1103 memset(chsc_page, 0, PAGE_SIZE);
1104 sda_area = chsc_page;
1105 sda_area->request.length = 0x0400; 1087 sda_area->request.length = 0x0400;
1106 sda_area->request.code = 0x0031; 1088 sda_area->request.code = 0x0031;
1107 sda_area->operation_code = operation_code; 1089 sda_area->operation_code = operation_code;
@@ -1119,10 +1101,25 @@ int chsc_enable_facility(int operation_code)
1119 default: 1101 default:
1120 ret = chsc_error_from_response(sda_area->response.code); 1102 ret = chsc_error_from_response(sda_area->response.code);
1121 } 1103 }
1104out:
1105 return ret;
1106}
1107
1108int chsc_enable_facility(int operation_code)
1109{
1110 struct chsc_sda_area *sda_area;
1111 unsigned long flags;
1112 int ret;
1113
1114 spin_lock_irqsave(&chsc_page_lock, flags);
1115 memset(chsc_page, 0, PAGE_SIZE);
1116 sda_area = chsc_page;
1117
1118 ret = __chsc_enable_facility(sda_area, operation_code);
1122 if (ret != 0) 1119 if (ret != 0)
1123 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", 1120 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
1124 operation_code, sda_area->response.code); 1121 operation_code, sda_area->response.code);
1125out: 1122
1126 spin_unlock_irqrestore(&chsc_page_lock, flags); 1123 spin_unlock_irqrestore(&chsc_page_lock, flags);
1127 return ret; 1124 return ret;
1128} 1125}
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 76c9b50700b2..0de134c3a204 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -115,6 +115,20 @@ struct chsc_scpd {
115 u8 data[PAGE_SIZE - 20]; 115 u8 data[PAGE_SIZE - 20];
116} __attribute__ ((packed)); 116} __attribute__ ((packed));
117 117
118struct chsc_sda_area {
119 struct chsc_header request;
120 u8 :4;
121 u8 format:4;
122 u8 :8;
123 u16 operation_code;
124 u32 :32;
125 u32 :32;
126 u32 operation_data_area[252];
127 struct chsc_header response;
128 u32 :4;
129 u32 format2:4;
130 u32 :24;
131} __packed __aligned(PAGE_SIZE);
118 132
119extern int chsc_get_ssd_info(struct subchannel_id schid, 133extern int chsc_get_ssd_info(struct subchannel_id schid,
120 struct chsc_ssd_info *ssd); 134 struct chsc_ssd_info *ssd);
@@ -122,6 +136,7 @@ extern int chsc_determine_css_characteristics(void);
122extern int chsc_init(void); 136extern int chsc_init(void);
123extern void chsc_init_cleanup(void); 137extern void chsc_init_cleanup(void);
124 138
139int __chsc_enable_facility(struct chsc_sda_area *sda_area, int operation_code);
125extern int chsc_enable_facility(int); 140extern int chsc_enable_facility(int);
126struct channel_subsystem; 141struct channel_subsystem;
127extern int chsc_secm(struct channel_subsystem *, int); 142extern int chsc_secm(struct channel_subsystem *, int);
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index b5620e818d6b..690b8547e828 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -925,18 +925,32 @@ void reipl_ccw_dev(struct ccw_dev_id *devid)
925 925
926int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo) 926int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
927{ 927{
928 static struct chsc_sda_area sda_area __initdata;
928 struct subchannel_id schid; 929 struct subchannel_id schid;
929 struct schib schib; 930 struct schib schib;
930 931
931 schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id; 932 schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id;
932 if (!schid.one) 933 if (!schid.one)
933 return -ENODEV; 934 return -ENODEV;
935
936 if (schid.ssid) {
937 /*
938 * Firmware should have already enabled MSS but whoever started
939 * the kernel might have initiated a channel subsystem reset.
940 * Ensure that MSS is enabled.
941 */
942 memset(&sda_area, 0, sizeof(sda_area));
943 if (__chsc_enable_facility(&sda_area, CHSC_SDA_OC_MSS))
944 return -ENODEV;
945 }
934 if (stsch_err(schid, &schib)) 946 if (stsch_err(schid, &schib))
935 return -ENODEV; 947 return -ENODEV;
936 if (schib.pmcw.st != SUBCHANNEL_TYPE_IO) 948 if (schib.pmcw.st != SUBCHANNEL_TYPE_IO)
937 return -ENODEV; 949 return -ENODEV;
938 if (!schib.pmcw.dnv) 950 if (!schib.pmcw.dnv)
939 return -ENODEV; 951 return -ENODEV;
952
953 iplinfo->ssid = schid.ssid;
940 iplinfo->devno = schib.pmcw.dev; 954 iplinfo->devno = schib.pmcw.dev;
941 iplinfo->is_qdio = schib.pmcw.qf; 955 iplinfo->is_qdio = schib.pmcw.qf;
942 return 0; 956 return 0;
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 2ee3053bdc12..489e703dc82d 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -702,17 +702,12 @@ css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
702 css->global_pgid.pgid_high.ext_cssid.version = 0x80; 702 css->global_pgid.pgid_high.ext_cssid.version = 0x80;
703 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; 703 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
704 } else { 704 } else {
705#ifdef CONFIG_SMP
706 css->global_pgid.pgid_high.cpu_addr = stap(); 705 css->global_pgid.pgid_high.cpu_addr = stap();
707#else
708 css->global_pgid.pgid_high.cpu_addr = 0;
709#endif
710 } 706 }
711 get_cpu_id(&cpu_id); 707 get_cpu_id(&cpu_id);
712 css->global_pgid.cpu_id = cpu_id.ident; 708 css->global_pgid.cpu_id = cpu_id.ident;
713 css->global_pgid.cpu_model = cpu_id.machine; 709 css->global_pgid.cpu_model = cpu_id.machine;
714 css->global_pgid.tod_high = tod_high; 710 css->global_pgid.tod_high = tod_high;
715
716} 711}
717 712
718static void 713static void
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
index 57f710b3c8a4..b8ab18676e69 100644
--- a/drivers/s390/crypto/Makefile
+++ b/drivers/s390/crypto/Makefile
@@ -3,6 +3,9 @@
3# 3#
4 4
5ap-objs := ap_bus.o 5ap-objs := ap_bus.o
6obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o zcrypt_pcixcc.o 6# zcrypt_api depends on ap
7obj-$(CONFIG_ZCRYPT) += zcrypt_cex2a.o zcrypt_cex4.o 7obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o
8# msgtype* depend on zcrypt_api
8obj-$(CONFIG_ZCRYPT) += zcrypt_msgtype6.o zcrypt_msgtype50.o 9obj-$(CONFIG_ZCRYPT) += zcrypt_msgtype6.o zcrypt_msgtype50.o
10# adapter drivers depend on ap, zcrypt_api and msgtype*
11obj-$(CONFIG_ZCRYPT) += zcrypt_pcixcc.o zcrypt_cex2a.o zcrypt_cex4.o
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 9cb3dfbcaddb..61f768518a34 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -74,6 +74,7 @@ static struct device *ap_root_device = NULL;
74static struct ap_config_info *ap_configuration; 74static struct ap_config_info *ap_configuration;
75static DEFINE_SPINLOCK(ap_device_list_lock); 75static DEFINE_SPINLOCK(ap_device_list_lock);
76static LIST_HEAD(ap_device_list); 76static LIST_HEAD(ap_device_list);
77static bool initialised;
77 78
78/* 79/*
79 * Workqueue timer for bus rescan. 80 * Workqueue timer for bus rescan.
@@ -1384,6 +1385,9 @@ int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
1384{ 1385{
1385 struct device_driver *drv = &ap_drv->driver; 1386 struct device_driver *drv = &ap_drv->driver;
1386 1387
1388 if (!initialised)
1389 return -ENODEV;
1390
1387 drv->bus = &ap_bus_type; 1391 drv->bus = &ap_bus_type;
1388 drv->probe = ap_device_probe; 1392 drv->probe = ap_device_probe;
1389 drv->remove = ap_device_remove; 1393 drv->remove = ap_device_remove;
@@ -1808,6 +1812,7 @@ int __init ap_module_init(void)
1808 goto out_pm; 1812 goto out_pm;
1809 1813
1810 queue_work(system_long_wq, &ap_scan_work); 1814 queue_work(system_long_wq, &ap_scan_work);
1815 initialised = true;
1811 1816
1812 return 0; 1817 return 0;
1813 1818
@@ -1837,6 +1842,7 @@ void ap_module_exit(void)
1837{ 1842{
1838 int i; 1843 int i;
1839 1844
1845 initialised = false;
1840 ap_reset_domain(); 1846 ap_reset_domain();
1841 ap_poll_thread_stop(); 1847 ap_poll_thread_stop();
1842 del_timer_sync(&ap_config_timer); 1848 del_timer_sync(&ap_config_timer);
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index a9603ebbc1f8..9f8fa42c062c 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -317,11 +317,9 @@ EXPORT_SYMBOL(zcrypt_device_unregister);
317 317
318void zcrypt_msgtype_register(struct zcrypt_ops *zops) 318void zcrypt_msgtype_register(struct zcrypt_ops *zops)
319{ 319{
320 if (zops->owner) { 320 spin_lock_bh(&zcrypt_ops_list_lock);
321 spin_lock_bh(&zcrypt_ops_list_lock); 321 list_add_tail(&zops->list, &zcrypt_ops_list);
322 list_add_tail(&zops->list, &zcrypt_ops_list); 322 spin_unlock_bh(&zcrypt_ops_list_lock);
323 spin_unlock_bh(&zcrypt_ops_list_lock);
324 }
325} 323}
326EXPORT_SYMBOL(zcrypt_msgtype_register); 324EXPORT_SYMBOL(zcrypt_msgtype_register);
327 325
@@ -342,7 +340,7 @@ struct zcrypt_ops *__ops_lookup(unsigned char *name, int variant)
342 spin_lock_bh(&zcrypt_ops_list_lock); 340 spin_lock_bh(&zcrypt_ops_list_lock);
343 list_for_each_entry(zops, &zcrypt_ops_list, list) { 341 list_for_each_entry(zops, &zcrypt_ops_list, list) {
344 if ((zops->variant == variant) && 342 if ((zops->variant == variant) &&
345 (!strncmp(zops->owner->name, name, MODULE_NAME_LEN))) { 343 (!strncmp(zops->name, name, sizeof(zops->name)))) {
346 found = 1; 344 found = 1;
347 break; 345 break;
348 } 346 }
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 750876891931..38618f05ad92 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -96,6 +96,7 @@ struct zcrypt_ops {
96 struct list_head list; /* zcrypt ops list. */ 96 struct list_head list; /* zcrypt ops list. */
97 struct module *owner; 97 struct module *owner;
98 int variant; 98 int variant;
99 char name[128];
99}; 100};
100 101
101struct zcrypt_device { 102struct zcrypt_device {
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index 71ceee9137a8..74edf2934e7c 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -513,6 +513,7 @@ static struct zcrypt_ops zcrypt_msgtype50_ops = {
513 .rsa_modexpo = zcrypt_cex2a_modexpo, 513 .rsa_modexpo = zcrypt_cex2a_modexpo,
514 .rsa_modexpo_crt = zcrypt_cex2a_modexpo_crt, 514 .rsa_modexpo_crt = zcrypt_cex2a_modexpo_crt,
515 .owner = THIS_MODULE, 515 .owner = THIS_MODULE,
516 .name = MSGTYPE50_NAME,
516 .variant = MSGTYPE50_VARIANT_DEFAULT, 517 .variant = MSGTYPE50_VARIANT_DEFAULT,
517}; 518};
518 519
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 74762214193b..9a2dd472c1cc 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -1119,6 +1119,7 @@ static long zcrypt_msgtype6_rng(struct zcrypt_device *zdev,
1119 */ 1119 */
1120static struct zcrypt_ops zcrypt_msgtype6_norng_ops = { 1120static struct zcrypt_ops zcrypt_msgtype6_norng_ops = {
1121 .owner = THIS_MODULE, 1121 .owner = THIS_MODULE,
1122 .name = MSGTYPE06_NAME,
1122 .variant = MSGTYPE06_VARIANT_NORNG, 1123 .variant = MSGTYPE06_VARIANT_NORNG,
1123 .rsa_modexpo = zcrypt_msgtype6_modexpo, 1124 .rsa_modexpo = zcrypt_msgtype6_modexpo,
1124 .rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt, 1125 .rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt,
@@ -1127,6 +1128,7 @@ static struct zcrypt_ops zcrypt_msgtype6_norng_ops = {
1127 1128
1128static struct zcrypt_ops zcrypt_msgtype6_ops = { 1129static struct zcrypt_ops zcrypt_msgtype6_ops = {
1129 .owner = THIS_MODULE, 1130 .owner = THIS_MODULE,
1131 .name = MSGTYPE06_NAME,
1130 .variant = MSGTYPE06_VARIANT_DEFAULT, 1132 .variant = MSGTYPE06_VARIANT_DEFAULT,
1131 .rsa_modexpo = zcrypt_msgtype6_modexpo, 1133 .rsa_modexpo = zcrypt_msgtype6_modexpo,
1132 .rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt, 1134 .rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt,
@@ -1136,6 +1138,7 @@ static struct zcrypt_ops zcrypt_msgtype6_ops = {
1136 1138
1137static struct zcrypt_ops zcrypt_msgtype6_ep11_ops = { 1139static struct zcrypt_ops zcrypt_msgtype6_ep11_ops = {
1138 .owner = THIS_MODULE, 1140 .owner = THIS_MODULE,
1141 .name = MSGTYPE06_NAME,
1139 .variant = MSGTYPE06_VARIANT_EP11, 1142 .variant = MSGTYPE06_VARIANT_EP11,
1140 .rsa_modexpo = NULL, 1143 .rsa_modexpo = NULL,
1141 .rsa_modexpo_crt = NULL, 1144 .rsa_modexpo_crt = NULL,
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 5f692ae40749..64eed87d34a8 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -364,6 +364,7 @@ config SCSI_HPSA
364 tristate "HP Smart Array SCSI driver" 364 tristate "HP Smart Array SCSI driver"
365 depends on PCI && SCSI 365 depends on PCI && SCSI
366 select CHECK_SIGNATURE 366 select CHECK_SIGNATURE
367 select SCSI_SAS_ATTRS
367 help 368 help
368 This driver supports HP Smart Array Controllers (circa 2009). 369 This driver supports HP Smart Array Controllers (circa 2009).
369 It is a SCSI alternative to the cciss driver, which is a block 370 It is a SCSI alternative to the cciss driver, which is a block
@@ -499,6 +500,7 @@ config SCSI_ADVANSYS
499 tristate "AdvanSys SCSI support" 500 tristate "AdvanSys SCSI support"
500 depends on SCSI 501 depends on SCSI
501 depends on ISA || EISA || PCI 502 depends on ISA || EISA || PCI
503 depends on ISA_DMA_API || !ISA
502 help 504 help
503 This is a driver for all SCSI host adapters manufactured by 505 This is a driver for all SCSI host adapters manufactured by
504 AdvanSys. It is documented in the kernel source in 506 AdvanSys. It is documented in the kernel source in
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 519f9a4b3dad..febbd83e2ecd 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -7803,7 +7803,7 @@ adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
7803 return ASC_BUSY; 7803 return ASC_BUSY;
7804 } 7804 }
7805 scsiqp->sense_addr = cpu_to_le32(sense_addr); 7805 scsiqp->sense_addr = cpu_to_le32(sense_addr);
7806 scsiqp->sense_len = cpu_to_le32(SCSI_SENSE_BUFFERSIZE); 7806 scsiqp->sense_len = SCSI_SENSE_BUFFERSIZE;
7807 7807
7808 /* Build ADV_SCSI_REQ_Q */ 7808 /* Build ADV_SCSI_REQ_Q */
7809 7809
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 323982fd00c3..82ac1cd818ac 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -333,6 +333,17 @@ static void scsi_host_dev_release(struct device *dev)
333 kfree(queuedata); 333 kfree(queuedata);
334 } 334 }
335 335
336 if (shost->shost_state == SHOST_CREATED) {
337 /*
338 * Free the shost_dev device name here if scsi_host_alloc()
339 * and scsi_host_put() have been called but neither
340 * scsi_host_add() nor scsi_host_remove() has been called.
341 * This avoids that the memory allocated for the shost_dev
342 * name is leaked.
343 */
344 kfree(dev_name(&shost->shost_dev));
345 }
346
336 scsi_destroy_command_freelist(shost); 347 scsi_destroy_command_freelist(shost);
337 if (shost_use_blk_mq(shost)) { 348 if (shost_use_blk_mq(shost)) {
338 if (shost->tag_set.tags) 349 if (shost->tag_set.tags)
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 6a8f95808ee0..a3860367b568 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -8671,7 +8671,7 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
8671 if ((rc != 0) || (c->err_info->CommandStatus != 0)) 8671 if ((rc != 0) || (c->err_info->CommandStatus != 0))
8672 goto errout; 8672 goto errout;
8673 8673
8674 if (*options && HPSA_DIAG_OPTS_DISABLE_RLD_CACHING) 8674 if (*options & HPSA_DIAG_OPTS_DISABLE_RLD_CACHING)
8675 goto out; 8675 goto out;
8676 8676
8677errout: 8677errout:
diff --git a/drivers/scsi/mpt3sas/Kconfig b/drivers/scsi/mpt3sas/Kconfig
index 29061467cc17..b736dbc80485 100644
--- a/drivers/scsi/mpt3sas/Kconfig
+++ b/drivers/scsi/mpt3sas/Kconfig
@@ -71,3 +71,12 @@ config SCSI_MPT3SAS_MAX_SGE
71 MAX_PHYS_SEGMENTS in most kernels. However in SuSE kernels this 71 MAX_PHYS_SEGMENTS in most kernels. However in SuSE kernels this
72 can be 256. However, it may decreased down to 16. Decreasing this 72 can be 256. However, it may decreased down to 16. Decreasing this
73 parameter will reduce memory requirements on a per controller instance. 73 parameter will reduce memory requirements on a per controller instance.
74
75config SCSI_MPT2SAS
76 tristate "Legacy MPT2SAS config option"
77 default n
78 select SCSI_MPT3SAS
79 depends on PCI && SCSI
80 ---help---
81 Dummy config option for backwards compatiblity: configure the MPT3SAS
82 driver instead.
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index d95206b7e116..9ab77b06434d 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -3905,8 +3905,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
3905 * We do not expose raid functionality to upper layer for warpdrive. 3905 * We do not expose raid functionality to upper layer for warpdrive.
3906 */ 3906 */
3907 if (!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev) 3907 if (!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev)
3908 && (sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) && 3908 && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
3909 scmd->cmd_len != 32)
3910 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON; 3909 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
3911 3910
3912 smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd); 3911 smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 90fdf0e859e3..675e7fab0796 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -758,7 +758,7 @@ mvs_store_interrupt_coalescing(struct device *cdev,
758 struct device_attribute *attr, 758 struct device_attribute *attr,
759 const char *buffer, size_t size) 759 const char *buffer, size_t size)
760{ 760{
761 int val = 0; 761 unsigned int val = 0;
762 struct mvs_info *mvi = NULL; 762 struct mvs_info *mvi = NULL;
763 struct Scsi_Host *shost = class_to_shost(cdev); 763 struct Scsi_Host *shost = class_to_shost(cdev);
764 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 764 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
@@ -766,7 +766,7 @@ mvs_store_interrupt_coalescing(struct device *cdev,
766 if (buffer == NULL) 766 if (buffer == NULL)
767 return size; 767 return size;
768 768
769 if (sscanf(buffer, "%d", &val) != 1) 769 if (sscanf(buffer, "%u", &val) != 1)
770 return -EINVAL; 770 return -EINVAL;
771 771
772 if (val >= 0x10000) { 772 if (val >= 0x10000) {
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index eb0cc5475c45..b6b4cfdd7620 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -433,7 +433,7 @@ qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong off_in,
433 if (off_in < QLA82XX_PCI_CRBSPACE) 433 if (off_in < QLA82XX_PCI_CRBSPACE)
434 return -1; 434 return -1;
435 435
436 *off_out = (void __iomem *)(off_in - QLA82XX_PCI_CRBSPACE); 436 off_in -= QLA82XX_PCI_CRBSPACE;
437 437
438 /* Try direct map */ 438 /* Try direct map */
439 m = &crb_128M_2M_map[CRB_BLK(off_in)].sub_block[CRB_SUBBLK(off_in)]; 439 m = &crb_128M_2M_map[CRB_BLK(off_in)].sub_block[CRB_SUBBLK(off_in)];
@@ -443,6 +443,7 @@ qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong off_in,
443 return 0; 443 return 0;
444 } 444 }
445 /* Not in direct map, use crb window */ 445 /* Not in direct map, use crb window */
446 *off_out = (void __iomem *)off_in;
446 return 1; 447 return 1;
447} 448}
448 449
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 3ba2e9564b9a..81af294f15a7 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -902,7 +902,7 @@ static ssize_t tcm_qla2xxx_tpg_fabric_prot_type_show(struct config_item *item,
902 return sprintf(page, "%d\n", tpg->tpg_attrib.fabric_prot_type); 902 return sprintf(page, "%d\n", tpg->tpg_attrib.fabric_prot_type);
903} 903}
904 904
905CONFIGFS_ATTR_WO(tcm_qla2xxx_tpg_, enable); 905CONFIGFS_ATTR(tcm_qla2xxx_tpg_, enable);
906CONFIGFS_ATTR_RO(tcm_qla2xxx_tpg_, dynamic_sessions); 906CONFIGFS_ATTR_RO(tcm_qla2xxx_tpg_, dynamic_sessions);
907CONFIGFS_ATTR(tcm_qla2xxx_tpg_, fabric_prot_type); 907CONFIGFS_ATTR(tcm_qla2xxx_tpg_, fabric_prot_type);
908 908
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index dfcc45bb03b1..d09d60293c27 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -465,8 +465,9 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
465 0} }, 465 0} },
466 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */ 466 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
467 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 467 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
468 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* VERIFY */ 468 {0, 0x2f, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, NULL, NULL, /* VERIFY(10) */
469 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 469 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
470 0, 0, 0, 0, 0, 0} },
470 {1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0, 471 {1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0,
471 vl_iarr, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0, 472 vl_iarr, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
472 0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */ 473 0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
@@ -477,8 +478,8 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
477 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 478 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
478 0} }, 479 0} },
479/* 20 */ 480/* 20 */
480 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ALLOW REMOVAL */ 481 {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
481 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 482 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
482 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */ 483 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
483 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 484 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
484 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */ 485 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 83245391e956..054923e3393c 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -701,9 +701,12 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
701 * strings. 701 * strings.
702 */ 702 */
703 if (sdev->inquiry_len < 36) { 703 if (sdev->inquiry_len < 36) {
704 sdev_printk(KERN_INFO, sdev, 704 if (!sdev->host->short_inquiry) {
705 "scsi scan: INQUIRY result too short (%d)," 705 shost_printk(KERN_INFO, sdev->host,
706 " using 36\n", sdev->inquiry_len); 706 "scsi scan: INQUIRY result too short (%d),"
707 " using 36\n", sdev->inquiry_len);
708 sdev->host->short_inquiry = 1;
709 }
707 sdev->inquiry_len = 36; 710 sdev->inquiry_len = 36;
708 } 711 }
709 712
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 8d2312239ae0..21930c9ac9cd 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1102,6 +1102,14 @@ void __scsi_remove_device(struct scsi_device *sdev)
1102{ 1102{
1103 struct device *dev = &sdev->sdev_gendev; 1103 struct device *dev = &sdev->sdev_gendev;
1104 1104
1105 /*
1106 * This cleanup path is not reentrant and while it is impossible
1107 * to get a new reference with scsi_device_get() someone can still
1108 * hold a previously acquired one.
1109 */
1110 if (sdev->sdev_state == SDEV_DEL)
1111 return;
1112
1105 if (sdev->is_visible) { 1113 if (sdev->is_visible) {
1106 if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0) 1114 if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0)
1107 return; 1115 return;
@@ -1110,7 +1118,9 @@ void __scsi_remove_device(struct scsi_device *sdev)
1110 device_unregister(&sdev->sdev_dev); 1118 device_unregister(&sdev->sdev_dev);
1111 transport_remove_device(dev); 1119 transport_remove_device(dev);
1112 scsi_dh_remove_device(sdev); 1120 scsi_dh_remove_device(sdev);
1113 } 1121 device_del(dev);
1122 } else
1123 put_device(&sdev->sdev_dev);
1114 1124
1115 /* 1125 /*
1116 * Stop accepting new requests and wait until all queuecommand() and 1126 * Stop accepting new requests and wait until all queuecommand() and
@@ -1121,16 +1131,6 @@ void __scsi_remove_device(struct scsi_device *sdev)
1121 blk_cleanup_queue(sdev->request_queue); 1131 blk_cleanup_queue(sdev->request_queue);
1122 cancel_work_sync(&sdev->requeue_work); 1132 cancel_work_sync(&sdev->requeue_work);
1123 1133
1124 /*
1125 * Remove the device after blk_cleanup_queue() has been called such
1126 * a possible bdi_register() call with the same name occurs after
1127 * blk_cleanup_queue() has called bdi_destroy().
1128 */
1129 if (sdev->is_visible)
1130 device_del(dev);
1131 else
1132 put_device(&sdev->sdev_dev);
1133
1134 if (sdev->host->hostt->slave_destroy) 1134 if (sdev->host->hostt->slave_destroy)
1135 sdev->host->hostt->slave_destroy(sdev); 1135 sdev->host->hostt->slave_destroy(sdev);
1136 transport_destroy_device(dev); 1136 transport_destroy_device(dev);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 54519804c46a..3d22fc3e3c1a 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -638,11 +638,24 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
638 unsigned int max_blocks = 0; 638 unsigned int max_blocks = 0;
639 639
640 q->limits.discard_zeroes_data = 0; 640 q->limits.discard_zeroes_data = 0;
641 q->limits.discard_alignment = sdkp->unmap_alignment * 641
642 logical_block_size; 642 /*
643 q->limits.discard_granularity = 643 * When LBPRZ is reported, discard alignment and granularity
644 max(sdkp->physical_block_size, 644 * must be fixed to the logical block size. Otherwise the block
645 sdkp->unmap_granularity * logical_block_size); 645 * layer will drop misaligned portions of the request which can
646 * lead to data corruption. If LBPRZ is not set, we honor the
647 * device preference.
648 */
649 if (sdkp->lbprz) {
650 q->limits.discard_alignment = 0;
651 q->limits.discard_granularity = 1;
652 } else {
653 q->limits.discard_alignment = sdkp->unmap_alignment *
654 logical_block_size;
655 q->limits.discard_granularity =
656 max(sdkp->physical_block_size,
657 sdkp->unmap_granularity * logical_block_size);
658 }
646 659
647 sdkp->provisioning_mode = mode; 660 sdkp->provisioning_mode = mode;
648 661
@@ -2321,11 +2334,8 @@ got_data:
2321 } 2334 }
2322 } 2335 }
2323 2336
2324 if (sdkp->capacity > 0xffffffff) { 2337 if (sdkp->capacity > 0xffffffff)
2325 sdp->use_16_for_rw = 1; 2338 sdp->use_16_for_rw = 1;
2326 sdkp->max_xfer_blocks = SD_MAX_XFER_BLOCKS;
2327 } else
2328 sdkp->max_xfer_blocks = SD_DEF_XFER_BLOCKS;
2329 2339
2330 /* Rescale capacity to 512-byte units */ 2340 /* Rescale capacity to 512-byte units */
2331 if (sector_size == 4096) 2341 if (sector_size == 4096)
@@ -2642,7 +2652,6 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
2642{ 2652{
2643 unsigned int sector_sz = sdkp->device->sector_size; 2653 unsigned int sector_sz = sdkp->device->sector_size;
2644 const int vpd_len = 64; 2654 const int vpd_len = 64;
2645 u32 max_xfer_length;
2646 unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL); 2655 unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL);
2647 2656
2648 if (!buffer || 2657 if (!buffer ||
@@ -2650,14 +2659,11 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
2650 scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len)) 2659 scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len))
2651 goto out; 2660 goto out;
2652 2661
2653 max_xfer_length = get_unaligned_be32(&buffer[8]);
2654 if (max_xfer_length)
2655 sdkp->max_xfer_blocks = max_xfer_length;
2656
2657 blk_queue_io_min(sdkp->disk->queue, 2662 blk_queue_io_min(sdkp->disk->queue,
2658 get_unaligned_be16(&buffer[6]) * sector_sz); 2663 get_unaligned_be16(&buffer[6]) * sector_sz);
2659 blk_queue_io_opt(sdkp->disk->queue, 2664
2660 get_unaligned_be32(&buffer[12]) * sector_sz); 2665 sdkp->max_xfer_blocks = get_unaligned_be32(&buffer[8]);
2666 sdkp->opt_xfer_blocks = get_unaligned_be32(&buffer[12]);
2661 2667
2662 if (buffer[3] == 0x3c) { 2668 if (buffer[3] == 0x3c) {
2663 unsigned int lba_count, desc_count; 2669 unsigned int lba_count, desc_count;
@@ -2806,6 +2812,11 @@ static int sd_try_extended_inquiry(struct scsi_device *sdp)
2806 return 0; 2812 return 0;
2807} 2813}
2808 2814
2815static inline u32 logical_to_sectors(struct scsi_device *sdev, u32 blocks)
2816{
2817 return blocks << (ilog2(sdev->sector_size) - 9);
2818}
2819
2809/** 2820/**
2810 * sd_revalidate_disk - called the first time a new disk is seen, 2821 * sd_revalidate_disk - called the first time a new disk is seen,
2811 * performs disk spin up, read_capacity, etc. 2822 * performs disk spin up, read_capacity, etc.
@@ -2815,8 +2826,9 @@ static int sd_revalidate_disk(struct gendisk *disk)
2815{ 2826{
2816 struct scsi_disk *sdkp = scsi_disk(disk); 2827 struct scsi_disk *sdkp = scsi_disk(disk);
2817 struct scsi_device *sdp = sdkp->device; 2828 struct scsi_device *sdp = sdkp->device;
2829 struct request_queue *q = sdkp->disk->queue;
2818 unsigned char *buffer; 2830 unsigned char *buffer;
2819 unsigned int max_xfer; 2831 unsigned int dev_max, rw_max;
2820 2832
2821 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, 2833 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp,
2822 "sd_revalidate_disk\n")); 2834 "sd_revalidate_disk\n"));
@@ -2864,11 +2876,26 @@ static int sd_revalidate_disk(struct gendisk *disk)
2864 */ 2876 */
2865 sd_set_flush_flag(sdkp); 2877 sd_set_flush_flag(sdkp);
2866 2878
2867 max_xfer = sdkp->max_xfer_blocks; 2879 /* Initial block count limit based on CDB TRANSFER LENGTH field size. */
2868 max_xfer <<= ilog2(sdp->sector_size) - 9; 2880 dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS;
2881
2882 /* Some devices report a maximum block count for READ/WRITE requests. */
2883 dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks);
2884 q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
2885
2886 /*
2887 * Use the device's preferred I/O size for reads and writes
2888 * unless the reported value is unreasonably large (or garbage).
2889 */
2890 if (sdkp->opt_xfer_blocks && sdkp->opt_xfer_blocks <= dev_max &&
2891 sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS)
2892 rw_max = q->limits.io_opt =
2893 logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
2894 else
2895 rw_max = BLK_DEF_MAX_SECTORS;
2869 2896
2870 sdkp->disk->queue->limits.max_sectors = 2897 /* Combine with controller limits */
2871 min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), max_xfer); 2898 q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
2872 2899
2873 set_capacity(disk, sdkp->capacity); 2900 set_capacity(disk, sdkp->capacity);
2874 sd_config_write_same(sdkp); 2901 sd_config_write_same(sdkp);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 63ba5ca7f9a1..5f2a84aff29f 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -67,6 +67,7 @@ struct scsi_disk {
67 atomic_t openers; 67 atomic_t openers;
68 sector_t capacity; /* size in 512-byte sectors */ 68 sector_t capacity; /* size in 512-byte sectors */
69 u32 max_xfer_blocks; 69 u32 max_xfer_blocks;
70 u32 opt_xfer_blocks;
70 u32 max_ws_blocks; 71 u32 max_ws_blocks;
71 u32 max_unmap_blocks; 72 u32 max_unmap_blocks;
72 u32 unmap_granularity; 73 u32 unmap_granularity;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index e0a1e52a04e7..2e522951b619 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4083,6 +4083,7 @@ static int create_one_cdev(struct scsi_tape *tape, int mode, int rew)
4083 } 4083 }
4084 cdev->owner = THIS_MODULE; 4084 cdev->owner = THIS_MODULE;
4085 cdev->ops = &st_fops; 4085 cdev->ops = &st_fops;
4086 STm->cdevs[rew] = cdev;
4086 4087
4087 error = cdev_add(cdev, cdev_devno, 1); 4088 error = cdev_add(cdev, cdev_devno, 1);
4088 if (error) { 4089 if (error) {
@@ -4091,7 +4092,6 @@ static int create_one_cdev(struct scsi_tape *tape, int mode, int rew)
4091 pr_err("st%d: Device not attached.\n", dev_num); 4092 pr_err("st%d: Device not attached.\n", dev_num);
4092 goto out_free; 4093 goto out_free;
4093 } 4094 }
4094 STm->cdevs[rew] = cdev;
4095 4095
4096 i = mode << (4 - ST_NBR_MODE_BITS); 4096 i = mode << (4 - ST_NBR_MODE_BITS);
4097 snprintf(name, 10, "%s%s%s", rew ? "n" : "", 4097 snprintf(name, 10, "%s%s%s", rew ? "n" : "",
@@ -4110,8 +4110,9 @@ static int create_one_cdev(struct scsi_tape *tape, int mode, int rew)
4110 return 0; 4110 return 0;
4111out_free: 4111out_free:
4112 cdev_del(STm->cdevs[rew]); 4112 cdev_del(STm->cdevs[rew]);
4113 STm->cdevs[rew] = NULL;
4114out: 4113out:
4114 STm->cdevs[rew] = NULL;
4115 STm->devs[rew] = NULL;
4115 return error; 4116 return error;
4116} 4117}
4117 4118
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c
index 25abd4eb7d10..91a003011acf 100644
--- a/drivers/sh/pm_runtime.c
+++ b/drivers/sh/pm_runtime.c
@@ -34,7 +34,7 @@ static struct pm_clk_notifier_block platform_bus_notifier = {
34 34
35static int __init sh_pm_runtime_init(void) 35static int __init sh_pm_runtime_init(void)
36{ 36{
37 if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) { 37 if (IS_ENABLED(CONFIG_ARCH_SHMOBILE)) {
38 if (!of_find_compatible_node(NULL, NULL, 38 if (!of_find_compatible_node(NULL, NULL,
39 "renesas,cpg-mstp-clocks")) 39 "renesas,cpg-mstp-clocks"))
40 return 0; 40 return 0;
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
index 9d5068248aa0..0a4ea809a61b 100644
--- a/drivers/soc/mediatek/Kconfig
+++ b/drivers/soc/mediatek/Kconfig
@@ -23,6 +23,7 @@ config MTK_PMIC_WRAP
23config MTK_SCPSYS 23config MTK_SCPSYS
24 bool "MediaTek SCPSYS Support" 24 bool "MediaTek SCPSYS Support"
25 depends on ARCH_MEDIATEK || COMPILE_TEST 25 depends on ARCH_MEDIATEK || COMPILE_TEST
26 default ARM64 && ARCH_MEDIATEK
26 select REGMAP 27 select REGMAP
27 select MTK_INFRACFG 28 select MTK_INFRACFG
28 select PM_GENERIC_DOMAINS if PM 29 select PM_GENERIC_DOMAINS if PM
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index f3a0b6a4b54e..8c03a80b482d 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -1179,7 +1179,7 @@ static int knav_queue_setup_link_ram(struct knav_device *kdev)
1179 1179
1180 block++; 1180 block++;
1181 if (!block->size) 1181 if (!block->size)
1182 return 0; 1182 continue;
1183 1183
1184 dev_dbg(kdev->dev, "linkram1: phys:%x, virt:%p, size:%x\n", 1184 dev_dbg(kdev->dev, "linkram1: phys:%x, virt:%p, size:%x\n",
1185 block->phys, block->virt, block->size); 1185 block->phys, block->virt, block->size);
@@ -1519,9 +1519,9 @@ static int knav_queue_load_pdsp(struct knav_device *kdev,
1519 1519
1520 for (i = 0; i < ARRAY_SIZE(knav_acc_firmwares); i++) { 1520 for (i = 0; i < ARRAY_SIZE(knav_acc_firmwares); i++) {
1521 if (knav_acc_firmwares[i]) { 1521 if (knav_acc_firmwares[i]) {
1522 ret = request_firmware(&fw, 1522 ret = request_firmware_direct(&fw,
1523 knav_acc_firmwares[i], 1523 knav_acc_firmwares[i],
1524 kdev->dev); 1524 kdev->dev);
1525 if (!ret) { 1525 if (!ret) {
1526 found = true; 1526 found = true;
1527 break; 1527 break;
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index 06858e04ec59..bf9a610e5b89 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -562,8 +562,8 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
562 goto out_clk_disable; 562 goto out_clk_disable;
563 } 563 }
564 564
565 dev_info(dev, "at 0x%08x (irq %d, FIFOs size %d)\n", 565 dev_info(dev, "at %pr (irq %d, FIFOs size %d)\n",
566 r->start, irq, bs->fifo_size); 566 r, irq, bs->fifo_size);
567 567
568 return 0; 568 return 0;
569 569
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 563954a61424..7840067062a8 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -410,7 +410,7 @@ static int mtk_spi_setup(struct spi_device *spi)
410 if (!spi->controller_data) 410 if (!spi->controller_data)
411 spi->controller_data = (void *)&mtk_default_chip_info; 411 spi->controller_data = (void *)&mtk_default_chip_info;
412 412
413 if (mdata->dev_comp->need_pad_sel) 413 if (mdata->dev_comp->need_pad_sel && gpio_is_valid(spi->cs_gpio))
414 gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH)); 414 gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
415 415
416 return 0; 416 return 0;
@@ -632,13 +632,23 @@ static int mtk_spi_probe(struct platform_device *pdev)
632 goto err_put_master; 632 goto err_put_master;
633 } 633 }
634 634
635 for (i = 0; i < master->num_chipselect; i++) { 635 if (!master->cs_gpios && master->num_chipselect > 1) {
636 ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i], 636 dev_err(&pdev->dev,
637 dev_name(&pdev->dev)); 637 "cs_gpios not specified and num_chipselect > 1\n");
638 if (ret) { 638 ret = -EINVAL;
639 dev_err(&pdev->dev, 639 goto err_put_master;
640 "can't get CS GPIO %i\n", i); 640 }
641 goto err_put_master; 641
642 if (master->cs_gpios) {
643 for (i = 0; i < master->num_chipselect; i++) {
644 ret = devm_gpio_request(&pdev->dev,
645 master->cs_gpios[i],
646 dev_name(&pdev->dev));
647 if (ret) {
648 dev_err(&pdev->dev,
649 "can't get CS GPIO %i\n", i);
650 goto err_put_master;
651 }
642 } 652 }
643 } 653 }
644 } 654 }
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 94af80676684..5e5fd77e2711 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -1171,19 +1171,31 @@ err_no_rxchan:
1171static int pl022_dma_autoprobe(struct pl022 *pl022) 1171static int pl022_dma_autoprobe(struct pl022 *pl022)
1172{ 1172{
1173 struct device *dev = &pl022->adev->dev; 1173 struct device *dev = &pl022->adev->dev;
1174 struct dma_chan *chan;
1175 int err;
1174 1176
1175 /* automatically configure DMA channels from platform, normally using DT */ 1177 /* automatically configure DMA channels from platform, normally using DT */
1176 pl022->dma_rx_channel = dma_request_slave_channel(dev, "rx"); 1178 chan = dma_request_slave_channel_reason(dev, "rx");
1177 if (!pl022->dma_rx_channel) 1179 if (IS_ERR(chan)) {
1180 err = PTR_ERR(chan);
1178 goto err_no_rxchan; 1181 goto err_no_rxchan;
1182 }
1183
1184 pl022->dma_rx_channel = chan;
1179 1185
1180 pl022->dma_tx_channel = dma_request_slave_channel(dev, "tx"); 1186 chan = dma_request_slave_channel_reason(dev, "tx");
1181 if (!pl022->dma_tx_channel) 1187 if (IS_ERR(chan)) {
1188 err = PTR_ERR(chan);
1182 goto err_no_txchan; 1189 goto err_no_txchan;
1190 }
1191
1192 pl022->dma_tx_channel = chan;
1183 1193
1184 pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL); 1194 pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL);
1185 if (!pl022->dummypage) 1195 if (!pl022->dummypage) {
1196 err = -ENOMEM;
1186 goto err_no_dummypage; 1197 goto err_no_dummypage;
1198 }
1187 1199
1188 return 0; 1200 return 0;
1189 1201
@@ -1194,7 +1206,7 @@ err_no_txchan:
1194 dma_release_channel(pl022->dma_rx_channel); 1206 dma_release_channel(pl022->dma_rx_channel);
1195 pl022->dma_rx_channel = NULL; 1207 pl022->dma_rx_channel = NULL;
1196err_no_rxchan: 1208err_no_rxchan:
1197 return -ENODEV; 1209 return err;
1198} 1210}
1199 1211
1200static void terminate_dma(struct pl022 *pl022) 1212static void terminate_dma(struct pl022 *pl022)
@@ -2236,6 +2248,10 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
2236 2248
2237 /* Get DMA channels, try autoconfiguration first */ 2249 /* Get DMA channels, try autoconfiguration first */
2238 status = pl022_dma_autoprobe(pl022); 2250 status = pl022_dma_autoprobe(pl022);
2251 if (status == -EPROBE_DEFER) {
2252 dev_dbg(dev, "deferring probe to get DMA channel\n");
2253 goto err_no_irq;
2254 }
2239 2255
2240 /* If that failed, use channels from platform_info */ 2256 /* If that failed, use channels from platform_info */
2241 if (status == 0) 2257 if (status == 0)
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index e2415be209d5..2b0a8ec3affb 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -376,6 +376,7 @@ static void spi_drv_shutdown(struct device *dev)
376 376
377/** 377/**
378 * __spi_register_driver - register a SPI driver 378 * __spi_register_driver - register a SPI driver
379 * @owner: owner module of the driver to register
379 * @sdrv: the driver to register 380 * @sdrv: the driver to register
380 * Context: can sleep 381 * Context: can sleep
381 * 382 *
@@ -2130,6 +2131,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
2130 * Set transfer tx_nbits and rx_nbits as single transfer default 2131 * Set transfer tx_nbits and rx_nbits as single transfer default
2131 * (SPI_NBITS_SINGLE) if it is not set for this transfer. 2132 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
2132 */ 2133 */
2134 message->frame_length = 0;
2133 list_for_each_entry(xfer, &message->transfers, transfer_list) { 2135 list_for_each_entry(xfer, &message->transfers, transfer_list) {
2134 message->frame_length += xfer->len; 2136 message->frame_length += xfer->len;
2135 if (!xfer->bits_per_word) 2137 if (!xfer->bits_per_word)
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index 6d5b38d69578..9d7f0004d2d7 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -18,7 +18,8 @@ source "drivers/staging/iio/resolver/Kconfig"
18source "drivers/staging/iio/trigger/Kconfig" 18source "drivers/staging/iio/trigger/Kconfig"
19 19
20config IIO_DUMMY_EVGEN 20config IIO_DUMMY_EVGEN
21 tristate 21 tristate
22 select IRQ_WORK
22 23
23config IIO_SIMPLE_DUMMY 24config IIO_SIMPLE_DUMMY
24 tristate "An example driver with no hardware requirements" 25 tristate "An example driver with no hardware requirements"
diff --git a/drivers/staging/iio/adc/lpc32xx_adc.c b/drivers/staging/iio/adc/lpc32xx_adc.c
index d11c54b72186..b51f237cd817 100644
--- a/drivers/staging/iio/adc/lpc32xx_adc.c
+++ b/drivers/staging/iio/adc/lpc32xx_adc.c
@@ -76,7 +76,7 @@ static int lpc32xx_read_raw(struct iio_dev *indio_dev,
76 76
77 if (mask == IIO_CHAN_INFO_RAW) { 77 if (mask == IIO_CHAN_INFO_RAW) {
78 mutex_lock(&indio_dev->mlock); 78 mutex_lock(&indio_dev->mlock);
79 clk_enable(info->clk); 79 clk_prepare_enable(info->clk);
80 /* Measurement setup */ 80 /* Measurement setup */
81 __raw_writel(AD_INTERNAL | (chan->address) | AD_REFp | AD_REFm, 81 __raw_writel(AD_INTERNAL | (chan->address) | AD_REFp | AD_REFm,
82 LPC32XX_ADC_SELECT(info->adc_base)); 82 LPC32XX_ADC_SELECT(info->adc_base));
@@ -84,7 +84,7 @@ static int lpc32xx_read_raw(struct iio_dev *indio_dev,
84 __raw_writel(AD_PDN_CTRL | AD_STROBE, 84 __raw_writel(AD_PDN_CTRL | AD_STROBE,
85 LPC32XX_ADC_CTRL(info->adc_base)); 85 LPC32XX_ADC_CTRL(info->adc_base));
86 wait_for_completion(&info->completion); /* set by ISR */ 86 wait_for_completion(&info->completion); /* set by ISR */
87 clk_disable(info->clk); 87 clk_disable_unprepare(info->clk);
88 *val = info->value; 88 *val = info->value;
89 mutex_unlock(&indio_dev->mlock); 89 mutex_unlock(&indio_dev->mlock);
90 90
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
index f5d741f25ffd..485ab2670918 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
@@ -110,7 +110,6 @@ struct libcfs_ioctl_handler {
110#define IOC_LIBCFS_CLEAR_DEBUG _IOWR('e', 31, long) 110#define IOC_LIBCFS_CLEAR_DEBUG _IOWR('e', 31, long)
111#define IOC_LIBCFS_MARK_DEBUG _IOWR('e', 32, long) 111#define IOC_LIBCFS_MARK_DEBUG _IOWR('e', 32, long)
112#define IOC_LIBCFS_MEMHOG _IOWR('e', 36, long) 112#define IOC_LIBCFS_MEMHOG _IOWR('e', 36, long)
113#define IOC_LIBCFS_PING_TEST _IOWR('e', 37, long)
114/* lnet ioctls */ 113/* lnet ioctls */
115#define IOC_LIBCFS_GET_NI _IOWR('e', 50, long) 114#define IOC_LIBCFS_GET_NI _IOWR('e', 50, long)
116#define IOC_LIBCFS_FAIL_NID _IOWR('e', 51, long) 115#define IOC_LIBCFS_FAIL_NID _IOWR('e', 51, long)
diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
index 07a68594c279..e7c2b26156b9 100644
--- a/drivers/staging/lustre/lustre/libcfs/module.c
+++ b/drivers/staging/lustre/lustre/libcfs/module.c
@@ -274,23 +274,6 @@ static int libcfs_ioctl_int(struct cfs_psdev_file *pfile, unsigned long cmd,
274 } 274 }
275 break; 275 break;
276 276
277 case IOC_LIBCFS_PING_TEST: {
278 extern void (kping_client)(struct libcfs_ioctl_data *);
279 void (*ping)(struct libcfs_ioctl_data *);
280
281 CDEBUG(D_IOCTL, "doing %d pings to nid %s (%s)\n",
282 data->ioc_count, libcfs_nid2str(data->ioc_nid),
283 libcfs_nid2str(data->ioc_nid));
284 ping = symbol_get(kping_client);
285 if (!ping)
286 CERROR("symbol_get failed\n");
287 else {
288 ping(data);
289 symbol_put(kping_client);
290 }
291 return 0;
292 }
293
294 default: { 277 default: {
295 struct libcfs_ioctl_handler *hand; 278 struct libcfs_ioctl_handler *hand;
296 279
diff --git a/drivers/staging/wilc1000/coreconfigurator.c b/drivers/staging/wilc1000/coreconfigurator.c
index e10c6ffa698a..9568bdb6319b 100644
--- a/drivers/staging/wilc1000/coreconfigurator.c
+++ b/drivers/staging/wilc1000/coreconfigurator.c
@@ -13,12 +13,8 @@
13#include "wilc_wlan.h" 13#include "wilc_wlan.h"
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/etherdevice.h>
17#define TAG_PARAM_OFFSET (MAC_HDR_LEN + TIME_STAMP_LEN + \ 16#define TAG_PARAM_OFFSET (MAC_HDR_LEN + TIME_STAMP_LEN + \
18 BEACON_INTERVAL_LEN + CAP_INFO_LEN) 17 BEACON_INTERVAL_LEN + CAP_INFO_LEN)
19#define ADDR1 4
20#define ADDR2 10
21#define ADDR3 16
22 18
23/* Basic Frame Type Codes (2-bit) */ 19/* Basic Frame Type Codes (2-bit) */
24enum basic_frame_type { 20enum basic_frame_type {
@@ -175,32 +171,38 @@ static inline u8 get_from_ds(u8 *header)
175 return ((header[1] & 0x02) >> 1); 171 return ((header[1] & 0x02) >> 1);
176} 172}
177 173
174/* This function extracts the MAC Address in 'address1' field of the MAC */
175/* header and updates the MAC Address in the allocated 'addr' variable. */
176static inline void get_address1(u8 *pu8msa, u8 *addr)
177{
178 memcpy(addr, pu8msa + 4, 6);
179}
180
181/* This function extracts the MAC Address in 'address2' field of the MAC */
182/* header and updates the MAC Address in the allocated 'addr' variable. */
183static inline void get_address2(u8 *pu8msa, u8 *addr)
184{
185 memcpy(addr, pu8msa + 10, 6);
186}
187
188/* This function extracts the MAC Address in 'address3' field of the MAC */
189/* header and updates the MAC Address in the allocated 'addr' variable. */
190static inline void get_address3(u8 *pu8msa, u8 *addr)
191{
192 memcpy(addr, pu8msa + 16, 6);
193}
194
178/* This function extracts the BSSID from the incoming WLAN packet based on */ 195/* This function extracts the BSSID from the incoming WLAN packet based on */
179/* the 'from ds' bit, and updates the MAC Address in the allocated 'data' */ 196/* the 'from ds' bit, and updates the MAC Address in the allocated 'addr' */
180/* variable. */ 197/* variable. */
181static inline void get_BSSID(u8 *data, u8 *bssid) 198static inline void get_BSSID(u8 *data, u8 *bssid)
182{ 199{
183 if (get_from_ds(data) == 1) 200 if (get_from_ds(data) == 1)
184 /* 201 get_address2(data, bssid);
185 * Extract the MAC Address in 'address2' field of the MAC
186 * header and update the MAC Address in the allocated 'data'
187 * variable.
188 */
189 ether_addr_copy(data, bssid + ADDR2);
190 else if (get_to_ds(data) == 1) 202 else if (get_to_ds(data) == 1)
191 /* 203 get_address1(data, bssid);
192 * Extract the MAC Address in 'address1' field of the MAC
193 * header and update the MAC Address in the allocated 'data'
194 * variable.
195 */
196 ether_addr_copy(data, bssid + ADDR1);
197 else 204 else
198 /* 205 get_address3(data, bssid);
199 * Extract the MAC Address in 'address3' field of the MAC
200 * header and update the MAC Address in the allocated 'data'
201 * variable.
202 */
203 ether_addr_copy(data, bssid + ADDR3);
204} 206}
205 207
206/* This function extracts the SSID from a beacon/probe response frame */ 208/* This function extracts the SSID from a beacon/probe response frame */
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 342a07c58d89..72204fbf2bb1 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -4074,6 +4074,17 @@ reject:
4074 return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); 4074 return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
4075} 4075}
4076 4076
4077static bool iscsi_target_check_conn_state(struct iscsi_conn *conn)
4078{
4079 bool ret;
4080
4081 spin_lock_bh(&conn->state_lock);
4082 ret = (conn->conn_state != TARG_CONN_STATE_LOGGED_IN);
4083 spin_unlock_bh(&conn->state_lock);
4084
4085 return ret;
4086}
4087
4077int iscsi_target_rx_thread(void *arg) 4088int iscsi_target_rx_thread(void *arg)
4078{ 4089{
4079 int ret, rc; 4090 int ret, rc;
@@ -4091,7 +4102,7 @@ int iscsi_target_rx_thread(void *arg)
4091 * incoming iscsi/tcp socket I/O, and/or failing the connection. 4102 * incoming iscsi/tcp socket I/O, and/or failing the connection.
4092 */ 4103 */
4093 rc = wait_for_completion_interruptible(&conn->rx_login_comp); 4104 rc = wait_for_completion_interruptible(&conn->rx_login_comp);
4094 if (rc < 0) 4105 if (rc < 0 || iscsi_target_check_conn_state(conn))
4095 return 0; 4106 return 0;
4096 4107
4097 if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) { 4108 if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 5c964c09c89f..9fc9117d0f22 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -388,6 +388,7 @@ err:
388 if (login->login_complete) { 388 if (login->login_complete) {
389 if (conn->rx_thread && conn->rx_thread_active) { 389 if (conn->rx_thread && conn->rx_thread_active) {
390 send_sig(SIGINT, conn->rx_thread, 1); 390 send_sig(SIGINT, conn->rx_thread, 1);
391 complete(&conn->rx_login_comp);
391 kthread_stop(conn->rx_thread); 392 kthread_stop(conn->rx_thread);
392 } 393 }
393 if (conn->tx_thread && conn->tx_thread_active) { 394 if (conn->tx_thread && conn->tx_thread_active) {
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 51d1734d5390..2cbea2af7cd0 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -208,7 +208,7 @@ int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr)
208 if (!pl) { 208 if (!pl) {
209 pr_err("Unable to allocate memory for" 209 pr_err("Unable to allocate memory for"
210 " struct iscsi_param_list.\n"); 210 " struct iscsi_param_list.\n");
211 return -1 ; 211 return -ENOMEM;
212 } 212 }
213 INIT_LIST_HEAD(&pl->param_list); 213 INIT_LIST_HEAD(&pl->param_list);
214 INIT_LIST_HEAD(&pl->extra_response_list); 214 INIT_LIST_HEAD(&pl->extra_response_list);
@@ -578,7 +578,7 @@ int iscsi_copy_param_list(
578 param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL); 578 param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL);
579 if (!param_list) { 579 if (!param_list) {
580 pr_err("Unable to allocate memory for struct iscsi_param_list.\n"); 580 pr_err("Unable to allocate memory for struct iscsi_param_list.\n");
581 return -1; 581 return -ENOMEM;
582 } 582 }
583 INIT_LIST_HEAD(&param_list->param_list); 583 INIT_LIST_HEAD(&param_list->param_list);
584 INIT_LIST_HEAD(&param_list->extra_response_list); 584 INIT_LIST_HEAD(&param_list->extra_response_list);
@@ -629,7 +629,7 @@ int iscsi_copy_param_list(
629 629
630err_out: 630err_out:
631 iscsi_release_param_list(param_list); 631 iscsi_release_param_list(param_list);
632 return -1; 632 return -ENOMEM;
633} 633}
634 634
635static void iscsi_release_extra_responses(struct iscsi_param_list *param_list) 635static void iscsi_release_extra_responses(struct iscsi_param_list *param_list)
@@ -729,7 +729,7 @@ static int iscsi_add_notunderstood_response(
729 if (!extra_response) { 729 if (!extra_response) {
730 pr_err("Unable to allocate memory for" 730 pr_err("Unable to allocate memory for"
731 " struct iscsi_extra_response.\n"); 731 " struct iscsi_extra_response.\n");
732 return -1; 732 return -ENOMEM;
733 } 733 }
734 INIT_LIST_HEAD(&extra_response->er_list); 734 INIT_LIST_HEAD(&extra_response->er_list);
735 735
@@ -1370,7 +1370,7 @@ int iscsi_decode_text_input(
1370 tmpbuf = kzalloc(length + 1, GFP_KERNEL); 1370 tmpbuf = kzalloc(length + 1, GFP_KERNEL);
1371 if (!tmpbuf) { 1371 if (!tmpbuf) {
1372 pr_err("Unable to allocate %u + 1 bytes for tmpbuf.\n", length); 1372 pr_err("Unable to allocate %u + 1 bytes for tmpbuf.\n", length);
1373 return -1; 1373 return -ENOMEM;
1374 } 1374 }
1375 1375
1376 memcpy(tmpbuf, textbuf, length); 1376 memcpy(tmpbuf, textbuf, length);
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 0b4b2a67d9f9..98698d875742 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -371,7 +371,8 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
371 return 0; 371 return 0;
372} 372}
373 373
374static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success) 374static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success,
375 int *post_ret)
375{ 376{
376 unsigned char *buf, *addr; 377 unsigned char *buf, *addr;
377 struct scatterlist *sg; 378 struct scatterlist *sg;
@@ -437,7 +438,8 @@ sbc_execute_rw(struct se_cmd *cmd)
437 cmd->data_direction); 438 cmd->data_direction);
438} 439}
439 440
440static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success) 441static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
442 int *post_ret)
441{ 443{
442 struct se_device *dev = cmd->se_dev; 444 struct se_device *dev = cmd->se_dev;
443 445
@@ -447,8 +449,10 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
447 * sent to the backend driver. 449 * sent to the backend driver.
448 */ 450 */
449 spin_lock_irq(&cmd->t_state_lock); 451 spin_lock_irq(&cmd->t_state_lock);
450 if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) 452 if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) {
451 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; 453 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
454 *post_ret = 1;
455 }
452 spin_unlock_irq(&cmd->t_state_lock); 456 spin_unlock_irq(&cmd->t_state_lock);
453 457
454 /* 458 /*
@@ -460,7 +464,8 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
460 return TCM_NO_SENSE; 464 return TCM_NO_SENSE;
461} 465}
462 466
463static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success) 467static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success,
468 int *post_ret)
464{ 469{
465 struct se_device *dev = cmd->se_dev; 470 struct se_device *dev = cmd->se_dev;
466 struct scatterlist *write_sg = NULL, *sg; 471 struct scatterlist *write_sg = NULL, *sg;
@@ -556,11 +561,11 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
556 561
557 if (block_size < PAGE_SIZE) { 562 if (block_size < PAGE_SIZE) {
558 sg_set_page(&write_sg[i], m.page, block_size, 563 sg_set_page(&write_sg[i], m.page, block_size,
559 block_size); 564 m.piter.sg->offset + block_size);
560 } else { 565 } else {
561 sg_miter_next(&m); 566 sg_miter_next(&m);
562 sg_set_page(&write_sg[i], m.page, block_size, 567 sg_set_page(&write_sg[i], m.page, block_size,
563 0); 568 m.piter.sg->offset);
564 } 569 }
565 len -= block_size; 570 len -= block_size;
566 i++; 571 i++;
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index 273c72b2b83d..81a6b3e07687 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -246,7 +246,7 @@ static ssize_t target_stat_lu_prod_show(struct config_item *item, char *page)
246 char str[sizeof(dev->t10_wwn.model)+1]; 246 char str[sizeof(dev->t10_wwn.model)+1];
247 247
248 /* scsiLuProductId */ 248 /* scsiLuProductId */
249 for (i = 0; i < sizeof(dev->t10_wwn.vendor); i++) 249 for (i = 0; i < sizeof(dev->t10_wwn.model); i++)
250 str[i] = ISPRINT(dev->t10_wwn.model[i]) ? 250 str[i] = ISPRINT(dev->t10_wwn.model[i]) ?
251 dev->t10_wwn.model[i] : ' '; 251 dev->t10_wwn.model[i] : ' ';
252 str[i] = '\0'; 252 str[i] = '\0';
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 5b2820312310..28fb3016370f 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -130,6 +130,9 @@ void core_tmr_abort_task(
130 if (tmr->ref_task_tag != ref_tag) 130 if (tmr->ref_task_tag != ref_tag)
131 continue; 131 continue;
132 132
133 if (!kref_get_unless_zero(&se_cmd->cmd_kref))
134 continue;
135
133 printk("ABORT_TASK: Found referenced %s task_tag: %llu\n", 136 printk("ABORT_TASK: Found referenced %s task_tag: %llu\n",
134 se_cmd->se_tfo->get_fabric_name(), ref_tag); 137 se_cmd->se_tfo->get_fabric_name(), ref_tag);
135 138
@@ -139,13 +142,15 @@ void core_tmr_abort_task(
139 " skipping\n", ref_tag); 142 " skipping\n", ref_tag);
140 spin_unlock(&se_cmd->t_state_lock); 143 spin_unlock(&se_cmd->t_state_lock);
141 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 144 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
145
146 target_put_sess_cmd(se_cmd);
147
142 goto out; 148 goto out;
143 } 149 }
144 se_cmd->transport_state |= CMD_T_ABORTED; 150 se_cmd->transport_state |= CMD_T_ABORTED;
145 spin_unlock(&se_cmd->t_state_lock); 151 spin_unlock(&se_cmd->t_state_lock);
146 152
147 list_del_init(&se_cmd->se_cmd_list); 153 list_del_init(&se_cmd->se_cmd_list);
148 kref_get(&se_cmd->cmd_kref);
149 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 154 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
150 155
151 cancel_work_sync(&se_cmd->work); 156 cancel_work_sync(&se_cmd->work);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 5bacc7b5ed6d..4fdcee2006d1 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1658,7 +1658,7 @@ bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
1658void transport_generic_request_failure(struct se_cmd *cmd, 1658void transport_generic_request_failure(struct se_cmd *cmd,
1659 sense_reason_t sense_reason) 1659 sense_reason_t sense_reason)
1660{ 1660{
1661 int ret = 0; 1661 int ret = 0, post_ret = 0;
1662 1662
1663 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx" 1663 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx"
1664 " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]); 1664 " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]);
@@ -1680,7 +1680,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
1680 */ 1680 */
1681 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 1681 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
1682 cmd->transport_complete_callback) 1682 cmd->transport_complete_callback)
1683 cmd->transport_complete_callback(cmd, false); 1683 cmd->transport_complete_callback(cmd, false, &post_ret);
1684 1684
1685 switch (sense_reason) { 1685 switch (sense_reason) {
1686 case TCM_NON_EXISTENT_LUN: 1686 case TCM_NON_EXISTENT_LUN:
@@ -2068,11 +2068,13 @@ static void target_complete_ok_work(struct work_struct *work)
2068 */ 2068 */
2069 if (cmd->transport_complete_callback) { 2069 if (cmd->transport_complete_callback) {
2070 sense_reason_t rc; 2070 sense_reason_t rc;
2071 bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE);
2072 bool zero_dl = !(cmd->data_length);
2073 int post_ret = 0;
2071 2074
2072 rc = cmd->transport_complete_callback(cmd, true); 2075 rc = cmd->transport_complete_callback(cmd, true, &post_ret);
2073 if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) { 2076 if (!rc && !post_ret) {
2074 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 2077 if (caw && zero_dl)
2075 !cmd->data_length)
2076 goto queue_rsp; 2078 goto queue_rsp;
2077 2079
2078 return; 2080 return;
@@ -2507,23 +2509,24 @@ out:
2507EXPORT_SYMBOL(target_get_sess_cmd); 2509EXPORT_SYMBOL(target_get_sess_cmd);
2508 2510
2509static void target_release_cmd_kref(struct kref *kref) 2511static void target_release_cmd_kref(struct kref *kref)
2510 __releases(&se_cmd->se_sess->sess_cmd_lock)
2511{ 2512{
2512 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); 2513 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
2513 struct se_session *se_sess = se_cmd->se_sess; 2514 struct se_session *se_sess = se_cmd->se_sess;
2515 unsigned long flags;
2514 2516
2517 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2515 if (list_empty(&se_cmd->se_cmd_list)) { 2518 if (list_empty(&se_cmd->se_cmd_list)) {
2516 spin_unlock(&se_sess->sess_cmd_lock); 2519 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2517 se_cmd->se_tfo->release_cmd(se_cmd); 2520 se_cmd->se_tfo->release_cmd(se_cmd);
2518 return; 2521 return;
2519 } 2522 }
2520 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { 2523 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
2521 spin_unlock(&se_sess->sess_cmd_lock); 2524 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2522 complete(&se_cmd->cmd_wait_comp); 2525 complete(&se_cmd->cmd_wait_comp);
2523 return; 2526 return;
2524 } 2527 }
2525 list_del(&se_cmd->se_cmd_list); 2528 list_del(&se_cmd->se_cmd_list);
2526 spin_unlock(&se_sess->sess_cmd_lock); 2529 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2527 2530
2528 se_cmd->se_tfo->release_cmd(se_cmd); 2531 se_cmd->se_tfo->release_cmd(se_cmd);
2529} 2532}
@@ -2539,8 +2542,7 @@ int target_put_sess_cmd(struct se_cmd *se_cmd)
2539 se_cmd->se_tfo->release_cmd(se_cmd); 2542 se_cmd->se_tfo->release_cmd(se_cmd);
2540 return 1; 2543 return 1;
2541 } 2544 }
2542 return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref, 2545 return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
2543 &se_sess->sess_cmd_lock);
2544} 2546}
2545EXPORT_SYMBOL(target_put_sess_cmd); 2547EXPORT_SYMBOL(target_put_sess_cmd);
2546 2548
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 937cebf76633..5e6d6cb348fc 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -638,7 +638,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
638 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) 638 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
639 return 0; 639 return 0;
640 640
641 if (!time_after(cmd->deadline, jiffies)) 641 if (!time_after(jiffies, cmd->deadline))
642 return 0; 642 return 0;
643 643
644 set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); 644 set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
@@ -1101,8 +1101,6 @@ tcmu_parse_cdb(struct se_cmd *cmd)
1101 1101
1102static const struct target_backend_ops tcmu_ops = { 1102static const struct target_backend_ops tcmu_ops = {
1103 .name = "user", 1103 .name = "user",
1104 .inquiry_prod = "USER",
1105 .inquiry_rev = TCMU_VERSION,
1106 .owner = THIS_MODULE, 1104 .owner = THIS_MODULE,
1107 .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, 1105 .transport_flags = TRANSPORT_FLAG_PASSTHROUGH,
1108 .attach_hba = tcmu_attach_hba, 1106 .attach_hba = tcmu_attach_hba,
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index c463c89b90ef..8cc4ac64a91c 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -382,7 +382,7 @@ endmenu
382 382
383config QCOM_SPMI_TEMP_ALARM 383config QCOM_SPMI_TEMP_ALARM
384 tristate "Qualcomm SPMI PMIC Temperature Alarm" 384 tristate "Qualcomm SPMI PMIC Temperature Alarm"
385 depends on OF && (SPMI || COMPILE_TEST) && IIO 385 depends on OF && SPMI && IIO
386 select REGMAP_SPMI 386 select REGMAP_SPMI
387 help 387 help
388 This enables a thermal sysfs driver for Qualcomm plug-and-play (QPNP) 388 This enables a thermal sysfs driver for Qualcomm plug-and-play (QPNP)
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index c8fe3cac2e0e..c5547bd711db 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -55,6 +55,7 @@
55#define TEMPSENSE2_PANIC_VALUE_SHIFT 16 55#define TEMPSENSE2_PANIC_VALUE_SHIFT 16
56#define TEMPSENSE2_PANIC_VALUE_MASK 0xfff0000 56#define TEMPSENSE2_PANIC_VALUE_MASK 0xfff0000
57 57
58#define OCOTP_MEM0 0x0480
58#define OCOTP_ANA1 0x04e0 59#define OCOTP_ANA1 0x04e0
59 60
60/* The driver supports 1 passive trip point and 1 critical trip point */ 61/* The driver supports 1 passive trip point and 1 critical trip point */
@@ -64,12 +65,6 @@ enum imx_thermal_trip {
64 IMX_TRIP_NUM, 65 IMX_TRIP_NUM,
65}; 66};
66 67
67/*
68 * It defines the temperature in millicelsius for passive trip point
69 * that will trigger cooling action when crossed.
70 */
71#define IMX_TEMP_PASSIVE 85000
72
73#define IMX_POLLING_DELAY 2000 /* millisecond */ 68#define IMX_POLLING_DELAY 2000 /* millisecond */
74#define IMX_PASSIVE_DELAY 1000 69#define IMX_PASSIVE_DELAY 1000
75 70
@@ -100,12 +95,14 @@ struct imx_thermal_data {
100 u32 c1, c2; /* See formula in imx_get_sensor_data() */ 95 u32 c1, c2; /* See formula in imx_get_sensor_data() */
101 int temp_passive; 96 int temp_passive;
102 int temp_critical; 97 int temp_critical;
98 int temp_max;
103 int alarm_temp; 99 int alarm_temp;
104 int last_temp; 100 int last_temp;
105 bool irq_enabled; 101 bool irq_enabled;
106 int irq; 102 int irq;
107 struct clk *thermal_clk; 103 struct clk *thermal_clk;
108 const struct thermal_soc_data *socdata; 104 const struct thermal_soc_data *socdata;
105 const char *temp_grade;
109}; 106};
110 107
111static void imx_set_panic_temp(struct imx_thermal_data *data, 108static void imx_set_panic_temp(struct imx_thermal_data *data,
@@ -285,10 +282,12 @@ static int imx_set_trip_temp(struct thermal_zone_device *tz, int trip,
285{ 282{
286 struct imx_thermal_data *data = tz->devdata; 283 struct imx_thermal_data *data = tz->devdata;
287 284
285 /* do not allow changing critical threshold */
288 if (trip == IMX_TRIP_CRITICAL) 286 if (trip == IMX_TRIP_CRITICAL)
289 return -EPERM; 287 return -EPERM;
290 288
291 if (temp < 0 || temp > IMX_TEMP_PASSIVE) 289 /* do not allow passive to be set higher than critical */
290 if (temp < 0 || temp > data->temp_critical)
292 return -EINVAL; 291 return -EINVAL;
293 292
294 data->temp_passive = temp; 293 data->temp_passive = temp;
@@ -404,17 +403,39 @@ static int imx_get_sensor_data(struct platform_device *pdev)
404 data->c1 = temp64; 403 data->c1 = temp64;
405 data->c2 = n1 * data->c1 + 1000 * t1; 404 data->c2 = n1 * data->c1 + 1000 * t1;
406 405
407 /* 406 /* use OTP for thermal grade */
408 * Set the default passive cooling trip point, 407 ret = regmap_read(map, OCOTP_MEM0, &val);
409 * can be changed from userspace. 408 if (ret) {
410 */ 409 dev_err(&pdev->dev, "failed to read temp grade: %d\n", ret);
411 data->temp_passive = IMX_TEMP_PASSIVE; 410 return ret;
411 }
412
413 /* The maximum die temp is specified by the Temperature Grade */
414 switch ((val >> 6) & 0x3) {
415 case 0: /* Commercial (0 to 95C) */
416 data->temp_grade = "Commercial";
417 data->temp_max = 95000;
418 break;
419 case 1: /* Extended Commercial (-20 to 105C) */
420 data->temp_grade = "Extended Commercial";
421 data->temp_max = 105000;
422 break;
423 case 2: /* Industrial (-40 to 105C) */
424 data->temp_grade = "Industrial";
425 data->temp_max = 105000;
426 break;
427 case 3: /* Automotive (-40 to 125C) */
428 data->temp_grade = "Automotive";
429 data->temp_max = 125000;
430 break;
431 }
412 432
413 /* 433 /*
414 * The maximum die temperature set to 20 C higher than 434 * Set the critical trip point at 5C under max
415 * IMX_TEMP_PASSIVE. 435 * Set the passive trip point at 10C under max (can change via sysfs)
416 */ 436 */
417 data->temp_critical = 1000 * 20 + data->temp_passive; 437 data->temp_critical = data->temp_max - (1000 * 5);
438 data->temp_passive = data->temp_max - (1000 * 10);
418 439
419 return 0; 440 return 0;
420} 441}
@@ -551,6 +572,11 @@ static int imx_thermal_probe(struct platform_device *pdev)
551 return ret; 572 return ret;
552 } 573 }
553 574
575 dev_info(&pdev->dev, "%s CPU temperature grade - max:%dC"
576 " critical:%dC passive:%dC\n", data->temp_grade,
577 data->temp_max / 1000, data->temp_critical / 1000,
578 data->temp_passive / 1000);
579
554 /* Enable measurements at ~ 10 Hz */ 580 /* Enable measurements at ~ 10 Hz */
555 regmap_write(map, TEMPSENSE1 + REG_CLR, TEMPSENSE1_MEASURE_FREQ); 581 regmap_write(map, TEMPSENSE1 + REG_CLR, TEMPSENSE1_MEASURE_FREQ);
556 measure_freq = DIV_ROUND_UP(32768, 10); /* 10 Hz */ 582 measure_freq = DIV_ROUND_UP(32768, 10); /* 10 Hz */
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 42b7d4253b94..be4eedcb839a 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -964,7 +964,7 @@ void of_thermal_destroy_zones(void)
964 964
965 np = of_find_node_by_name(NULL, "thermal-zones"); 965 np = of_find_node_by_name(NULL, "thermal-zones");
966 if (!np) { 966 if (!np) {
967 pr_err("unable to find thermal zones\n"); 967 pr_debug("unable to find thermal zones\n");
968 return; 968 return;
969 } 969 }
970 970
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
index f0fbea386869..1246aa6fcab0 100644
--- a/drivers/thermal/power_allocator.c
+++ b/drivers/thermal/power_allocator.c
@@ -174,7 +174,6 @@ static void estimate_pid_constants(struct thermal_zone_device *tz,
174/** 174/**
175 * pid_controller() - PID controller 175 * pid_controller() - PID controller
176 * @tz: thermal zone we are operating in 176 * @tz: thermal zone we are operating in
177 * @current_temp: the current temperature in millicelsius
178 * @control_temp: the target temperature in millicelsius 177 * @control_temp: the target temperature in millicelsius
179 * @max_allocatable_power: maximum allocatable power for this thermal zone 178 * @max_allocatable_power: maximum allocatable power for this thermal zone
180 * 179 *
@@ -191,7 +190,6 @@ static void estimate_pid_constants(struct thermal_zone_device *tz,
191 * Return: The power budget for the next period. 190 * Return: The power budget for the next period.
192 */ 191 */
193static u32 pid_controller(struct thermal_zone_device *tz, 192static u32 pid_controller(struct thermal_zone_device *tz,
194 int current_temp,
195 int control_temp, 193 int control_temp,
196 u32 max_allocatable_power) 194 u32 max_allocatable_power)
197{ 195{
@@ -211,7 +209,7 @@ static u32 pid_controller(struct thermal_zone_device *tz,
211 true); 209 true);
212 } 210 }
213 211
214 err = control_temp - current_temp; 212 err = control_temp - tz->temperature;
215 err = int_to_frac(err); 213 err = int_to_frac(err);
216 214
217 /* Calculate the proportional term */ 215 /* Calculate the proportional term */
@@ -332,7 +330,6 @@ static void divvy_up_power(u32 *req_power, u32 *max_power, int num_actors,
332} 330}
333 331
334static int allocate_power(struct thermal_zone_device *tz, 332static int allocate_power(struct thermal_zone_device *tz,
335 int current_temp,
336 int control_temp) 333 int control_temp)
337{ 334{
338 struct thermal_instance *instance; 335 struct thermal_instance *instance;
@@ -418,8 +415,7 @@ static int allocate_power(struct thermal_zone_device *tz,
418 i++; 415 i++;
419 } 416 }
420 417
421 power_range = pid_controller(tz, current_temp, control_temp, 418 power_range = pid_controller(tz, control_temp, max_allocatable_power);
422 max_allocatable_power);
423 419
424 divvy_up_power(weighted_req_power, max_power, num_actors, 420 divvy_up_power(weighted_req_power, max_power, num_actors,
425 total_weighted_req_power, power_range, granted_power, 421 total_weighted_req_power, power_range, granted_power,
@@ -444,8 +440,8 @@ static int allocate_power(struct thermal_zone_device *tz,
444 trace_thermal_power_allocator(tz, req_power, total_req_power, 440 trace_thermal_power_allocator(tz, req_power, total_req_power,
445 granted_power, total_granted_power, 441 granted_power, total_granted_power,
446 num_actors, power_range, 442 num_actors, power_range,
447 max_allocatable_power, current_temp, 443 max_allocatable_power, tz->temperature,
448 control_temp - current_temp); 444 control_temp - tz->temperature);
449 445
450 kfree(req_power); 446 kfree(req_power);
451unlock: 447unlock:
@@ -612,7 +608,7 @@ static void power_allocator_unbind(struct thermal_zone_device *tz)
612static int power_allocator_throttle(struct thermal_zone_device *tz, int trip) 608static int power_allocator_throttle(struct thermal_zone_device *tz, int trip)
613{ 609{
614 int ret; 610 int ret;
615 int switch_on_temp, control_temp, current_temp; 611 int switch_on_temp, control_temp;
616 struct power_allocator_params *params = tz->governor_data; 612 struct power_allocator_params *params = tz->governor_data;
617 613
618 /* 614 /*
@@ -622,15 +618,9 @@ static int power_allocator_throttle(struct thermal_zone_device *tz, int trip)
622 if (trip != params->trip_max_desired_temperature) 618 if (trip != params->trip_max_desired_temperature)
623 return 0; 619 return 0;
624 620
625 ret = thermal_zone_get_temp(tz, &current_temp);
626 if (ret) {
627 dev_warn(&tz->device, "Failed to get temperature: %d\n", ret);
628 return ret;
629 }
630
631 ret = tz->ops->get_trip_temp(tz, params->trip_switch_on, 621 ret = tz->ops->get_trip_temp(tz, params->trip_switch_on,
632 &switch_on_temp); 622 &switch_on_temp);
633 if (!ret && (current_temp < switch_on_temp)) { 623 if (!ret && (tz->temperature < switch_on_temp)) {
634 tz->passive = 0; 624 tz->passive = 0;
635 reset_pid_controller(params); 625 reset_pid_controller(params);
636 allow_maximum_power(tz); 626 allow_maximum_power(tz);
@@ -648,7 +638,7 @@ static int power_allocator_throttle(struct thermal_zone_device *tz, int trip)
648 return ret; 638 return ret;
649 } 639 }
650 640
651 return allocate_power(tz, current_temp, control_temp); 641 return allocate_power(tz, control_temp);
652} 642}
653 643
654static struct thermal_governor thermal_gov_power_allocator = { 644static struct thermal_governor thermal_gov_power_allocator = {
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 5d4ae7d705e0..13d01edc7a04 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -361,6 +361,24 @@ static irqreturn_t rcar_thermal_irq(int irq, void *data)
361/* 361/*
362 * platform functions 362 * platform functions
363 */ 363 */
364static int rcar_thermal_remove(struct platform_device *pdev)
365{
366 struct rcar_thermal_common *common = platform_get_drvdata(pdev);
367 struct device *dev = &pdev->dev;
368 struct rcar_thermal_priv *priv;
369
370 rcar_thermal_for_each_priv(priv, common) {
371 if (rcar_has_irq_support(priv))
372 rcar_thermal_irq_disable(priv);
373 thermal_zone_device_unregister(priv->zone);
374 }
375
376 pm_runtime_put(dev);
377 pm_runtime_disable(dev);
378
379 return 0;
380}
381
364static int rcar_thermal_probe(struct platform_device *pdev) 382static int rcar_thermal_probe(struct platform_device *pdev)
365{ 383{
366 struct rcar_thermal_common *common; 384 struct rcar_thermal_common *common;
@@ -377,6 +395,8 @@ static int rcar_thermal_probe(struct platform_device *pdev)
377 if (!common) 395 if (!common)
378 return -ENOMEM; 396 return -ENOMEM;
379 397
398 platform_set_drvdata(pdev, common);
399
380 INIT_LIST_HEAD(&common->head); 400 INIT_LIST_HEAD(&common->head);
381 spin_lock_init(&common->lock); 401 spin_lock_init(&common->lock);
382 common->dev = dev; 402 common->dev = dev;
@@ -454,43 +474,16 @@ static int rcar_thermal_probe(struct platform_device *pdev)
454 rcar_thermal_common_write(common, ENR, enr_bits); 474 rcar_thermal_common_write(common, ENR, enr_bits);
455 } 475 }
456 476
457 platform_set_drvdata(pdev, common);
458
459 dev_info(dev, "%d sensor probed\n", i); 477 dev_info(dev, "%d sensor probed\n", i);
460 478
461 return 0; 479 return 0;
462 480
463error_unregister: 481error_unregister:
464 rcar_thermal_for_each_priv(priv, common) { 482 rcar_thermal_remove(pdev);
465 if (rcar_has_irq_support(priv))
466 rcar_thermal_irq_disable(priv);
467 thermal_zone_device_unregister(priv->zone);
468 }
469
470 pm_runtime_put(dev);
471 pm_runtime_disable(dev);
472 483
473 return ret; 484 return ret;
474} 485}
475 486
476static int rcar_thermal_remove(struct platform_device *pdev)
477{
478 struct rcar_thermal_common *common = platform_get_drvdata(pdev);
479 struct device *dev = &pdev->dev;
480 struct rcar_thermal_priv *priv;
481
482 rcar_thermal_for_each_priv(priv, common) {
483 if (rcar_has_irq_support(priv))
484 rcar_thermal_irq_disable(priv);
485 thermal_zone_device_unregister(priv->zone);
486 }
487
488 pm_runtime_put(dev);
489 pm_runtime_disable(dev);
490
491 return 0;
492}
493
494static const struct of_device_id rcar_thermal_dt_ids[] = { 487static const struct of_device_id rcar_thermal_dt_ids[] = {
495 { .compatible = "renesas,rcar-thermal", }, 488 { .compatible = "renesas,rcar-thermal", },
496 {}, 489 {},
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index 9787e8aa509f..e845841ab036 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -1,6 +1,9 @@
1/* 1/*
2 * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd 2 * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
3 * 3 *
4 * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
5 * Caesar Wang <wxt@rock-chips.com>
6 *
4 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License, 8 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation. 9 * version 2, as published by the Free Software Foundation.
@@ -45,17 +48,50 @@ enum tshut_polarity {
45}; 48};
46 49
47/** 50/**
48 * The system has three Temperature Sensors. channel 0 is reserved, 51 * The system has two Temperature Sensors.
49 * channel 1 is for CPU, and channel 2 is for GPU. 52 * sensor0 is for CPU, and sensor1 is for GPU.
50 */ 53 */
51enum sensor_id { 54enum sensor_id {
52 SENSOR_CPU = 1, 55 SENSOR_CPU = 0,
53 SENSOR_GPU, 56 SENSOR_GPU,
54}; 57};
55 58
59/**
60* The conversion table has the adc value and temperature.
61* ADC_DECREMENT is the adc value decremnet.(e.g. v2_code_table)
62* ADC_INCREMNET is the adc value incremnet.(e.g. v3_code_table)
63*/
64enum adc_sort_mode {
65 ADC_DECREMENT = 0,
66 ADC_INCREMENT,
67};
68
69/**
70 * The max sensors is two in rockchip SoCs.
71 * Two sensors: CPU and GPU sensor.
72 */
73#define SOC_MAX_SENSORS 2
74
75struct chip_tsadc_table {
76 const struct tsadc_table *id;
77
78 /* the array table size*/
79 unsigned int length;
80
81 /* that analogic mask data */
82 u32 data_mask;
83
84 /* the sort mode is adc value that increment or decrement in table */
85 enum adc_sort_mode mode;
86};
87
56struct rockchip_tsadc_chip { 88struct rockchip_tsadc_chip {
89 /* The sensor id of chip correspond to the ADC channel */
90 int chn_id[SOC_MAX_SENSORS];
91 int chn_num;
92
57 /* The hardware-controlled tshut property */ 93 /* The hardware-controlled tshut property */
58 long tshut_temp; 94 int tshut_temp;
59 enum tshut_mode tshut_mode; 95 enum tshut_mode tshut_mode;
60 enum tshut_polarity tshut_polarity; 96 enum tshut_polarity tshut_polarity;
61 97
@@ -65,37 +101,40 @@ struct rockchip_tsadc_chip {
65 void (*control)(void __iomem *reg, bool on); 101 void (*control)(void __iomem *reg, bool on);
66 102
67 /* Per-sensor methods */ 103 /* Per-sensor methods */
68 int (*get_temp)(int chn, void __iomem *reg, int *temp); 104 int (*get_temp)(struct chip_tsadc_table table,
69 void (*set_tshut_temp)(int chn, void __iomem *reg, long temp); 105 int chn, void __iomem *reg, int *temp);
106 void (*set_tshut_temp)(struct chip_tsadc_table table,
107 int chn, void __iomem *reg, int temp);
70 void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m); 108 void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m);
109
110 /* Per-table methods */
111 struct chip_tsadc_table table;
71}; 112};
72 113
73struct rockchip_thermal_sensor { 114struct rockchip_thermal_sensor {
74 struct rockchip_thermal_data *thermal; 115 struct rockchip_thermal_data *thermal;
75 struct thermal_zone_device *tzd; 116 struct thermal_zone_device *tzd;
76 enum sensor_id id; 117 int id;
77}; 118};
78 119
79#define NUM_SENSORS 2 /* Ignore unused sensor 0 */
80
81struct rockchip_thermal_data { 120struct rockchip_thermal_data {
82 const struct rockchip_tsadc_chip *chip; 121 const struct rockchip_tsadc_chip *chip;
83 struct platform_device *pdev; 122 struct platform_device *pdev;
84 struct reset_control *reset; 123 struct reset_control *reset;
85 124
86 struct rockchip_thermal_sensor sensors[NUM_SENSORS]; 125 struct rockchip_thermal_sensor sensors[SOC_MAX_SENSORS];
87 126
88 struct clk *clk; 127 struct clk *clk;
89 struct clk *pclk; 128 struct clk *pclk;
90 129
91 void __iomem *regs; 130 void __iomem *regs;
92 131
93 long tshut_temp; 132 int tshut_temp;
94 enum tshut_mode tshut_mode; 133 enum tshut_mode tshut_mode;
95 enum tshut_polarity tshut_polarity; 134 enum tshut_polarity tshut_polarity;
96}; 135};
97 136
98/* TSADC V2 Sensor info define: */ 137/* TSADC Sensor info define: */
99#define TSADCV2_AUTO_CON 0x04 138#define TSADCV2_AUTO_CON 0x04
100#define TSADCV2_INT_EN 0x08 139#define TSADCV2_INT_EN 0x08
101#define TSADCV2_INT_PD 0x0c 140#define TSADCV2_INT_PD 0x0c
@@ -117,6 +156,8 @@ struct rockchip_thermal_data {
117#define TSADCV2_INT_PD_CLEAR_MASK ~BIT(8) 156#define TSADCV2_INT_PD_CLEAR_MASK ~BIT(8)
118 157
119#define TSADCV2_DATA_MASK 0xfff 158#define TSADCV2_DATA_MASK 0xfff
159#define TSADCV3_DATA_MASK 0x3ff
160
120#define TSADCV2_HIGHT_INT_DEBOUNCE_COUNT 4 161#define TSADCV2_HIGHT_INT_DEBOUNCE_COUNT 4
121#define TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT 4 162#define TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT 4
122#define TSADCV2_AUTO_PERIOD_TIME 250 /* msec */ 163#define TSADCV2_AUTO_PERIOD_TIME 250 /* msec */
@@ -124,7 +165,7 @@ struct rockchip_thermal_data {
124 165
125struct tsadc_table { 166struct tsadc_table {
126 u32 code; 167 u32 code;
127 long temp; 168 int temp;
128}; 169};
129 170
130static const struct tsadc_table v2_code_table[] = { 171static const struct tsadc_table v2_code_table[] = {
@@ -165,21 +206,61 @@ static const struct tsadc_table v2_code_table[] = {
165 {3421, 125000}, 206 {3421, 125000},
166}; 207};
167 208
168static u32 rk_tsadcv2_temp_to_code(long temp) 209static const struct tsadc_table v3_code_table[] = {
210 {0, -40000},
211 {106, -40000},
212 {108, -35000},
213 {110, -30000},
214 {112, -25000},
215 {114, -20000},
216 {116, -15000},
217 {118, -10000},
218 {120, -5000},
219 {122, 0},
220 {124, 5000},
221 {126, 10000},
222 {128, 15000},
223 {130, 20000},
224 {132, 25000},
225 {134, 30000},
226 {136, 35000},
227 {138, 40000},
228 {140, 45000},
229 {142, 50000},
230 {144, 55000},
231 {146, 60000},
232 {148, 65000},
233 {150, 70000},
234 {152, 75000},
235 {154, 80000},
236 {156, 85000},
237 {158, 90000},
238 {160, 95000},
239 {162, 100000},
240 {163, 105000},
241 {165, 110000},
242 {167, 115000},
243 {169, 120000},
244 {171, 125000},
245 {TSADCV3_DATA_MASK, 125000},
246};
247
248static u32 rk_tsadcv2_temp_to_code(struct chip_tsadc_table table,
249 int temp)
169{ 250{
170 int high, low, mid; 251 int high, low, mid;
171 252
172 low = 0; 253 low = 0;
173 high = ARRAY_SIZE(v2_code_table) - 1; 254 high = table.length - 1;
174 mid = (high + low) / 2; 255 mid = (high + low) / 2;
175 256
176 if (temp < v2_code_table[low].temp || temp > v2_code_table[high].temp) 257 if (temp < table.id[low].temp || temp > table.id[high].temp)
177 return 0; 258 return 0;
178 259
179 while (low <= high) { 260 while (low <= high) {
180 if (temp == v2_code_table[mid].temp) 261 if (temp == table.id[mid].temp)
181 return v2_code_table[mid].code; 262 return table.id[mid].code;
182 else if (temp < v2_code_table[mid].temp) 263 else if (temp < table.id[mid].temp)
183 high = mid - 1; 264 high = mid - 1;
184 else 265 else
185 low = mid + 1; 266 low = mid + 1;
@@ -189,29 +270,54 @@ static u32 rk_tsadcv2_temp_to_code(long temp)
189 return 0; 270 return 0;
190} 271}
191 272
192static int rk_tsadcv2_code_to_temp(u32 code, int *temp) 273static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
274 int *temp)
193{ 275{
194 unsigned int low = 1; 276 unsigned int low = 1;
195 unsigned int high = ARRAY_SIZE(v2_code_table) - 1; 277 unsigned int high = table.length - 1;
196 unsigned int mid = (low + high) / 2; 278 unsigned int mid = (low + high) / 2;
197 unsigned int num; 279 unsigned int num;
198 unsigned long denom; 280 unsigned long denom;
199 281
200 BUILD_BUG_ON(ARRAY_SIZE(v2_code_table) < 2); 282 WARN_ON(table.length < 2);
201 283
202 code &= TSADCV2_DATA_MASK; 284 switch (table.mode) {
203 if (code < v2_code_table[high].code) 285 case ADC_DECREMENT:
204 return -EAGAIN; /* Incorrect reading */ 286 code &= table.data_mask;
205 287 if (code < table.id[high].code)
206 while (low <= high) { 288 return -EAGAIN; /* Incorrect reading */
207 if (code >= v2_code_table[mid].code && 289
208 code < v2_code_table[mid - 1].code) 290 while (low <= high) {
209 break; 291 if (code >= table.id[mid].code &&
210 else if (code < v2_code_table[mid].code) 292 code < table.id[mid - 1].code)
211 low = mid + 1; 293 break;
212 else 294 else if (code < table.id[mid].code)
213 high = mid - 1; 295 low = mid + 1;
214 mid = (low + high) / 2; 296 else
297 high = mid - 1;
298
299 mid = (low + high) / 2;
300 }
301 break;
302 case ADC_INCREMENT:
303 code &= table.data_mask;
304 if (code < table.id[low].code)
305 return -EAGAIN; /* Incorrect reading */
306
307 while (low <= high) {
308 if (code >= table.id[mid - 1].code &&
309 code < table.id[mid].code)
310 break;
311 else if (code > table.id[mid].code)
312 low = mid + 1;
313 else
314 high = mid - 1;
315
316 mid = (low + high) / 2;
317 }
318 break;
319 default:
320 pr_err("Invalid the conversion table\n");
215 } 321 }
216 322
217 /* 323 /*
@@ -220,24 +326,28 @@ static int rk_tsadcv2_code_to_temp(u32 code, int *temp)
220 * temperature between 2 table entries is linear and interpolate 326 * temperature between 2 table entries is linear and interpolate
221 * to produce less granular result. 327 * to produce less granular result.
222 */ 328 */
223 num = v2_code_table[mid].temp - v2_code_table[mid - 1].temp; 329 num = table.id[mid].temp - v2_code_table[mid - 1].temp;
224 num *= v2_code_table[mid - 1].code - code; 330 num *= abs(table.id[mid - 1].code - code);
225 denom = v2_code_table[mid - 1].code - v2_code_table[mid].code; 331 denom = abs(table.id[mid - 1].code - table.id[mid].code);
226 *temp = v2_code_table[mid - 1].temp + (num / denom); 332 *temp = table.id[mid - 1].temp + (num / denom);
227 333
228 return 0; 334 return 0;
229} 335}
230 336
231/** 337/**
232 * rk_tsadcv2_initialize - initialize TASDC Controller 338 * rk_tsadcv2_initialize - initialize TASDC Controller.
233 * (1) Set TSADCV2_AUTO_PERIOD, configure the interleave between 339 *
234 * every two accessing of TSADC in normal operation. 340 * (1) Set TSADC_V2_AUTO_PERIOD:
235 * (2) Set TSADCV2_AUTO_PERIOD_HT, configure the interleave between 341 * Configure the interleave between every two accessing of
236 * every two accessing of TSADC after the temperature is higher 342 * TSADC in normal operation.
237 * than COM_SHUT or COM_INT. 343 *
238 * (3) Set TSADCV2_HIGH_INT_DEBOUNCE and TSADC_HIGHT_TSHUT_DEBOUNCE, 344 * (2) Set TSADCV2_AUTO_PERIOD_HT:
239 * if the temperature is higher than COMP_INT or COMP_SHUT for 345 * Configure the interleave between every two accessing of
240 * "debounce" times, TSADC controller will generate interrupt or TSHUT. 346 * TSADC after the temperature is higher than COM_SHUT or COM_INT.
347 *
348 * (3) Set TSADCV2_HIGH_INT_DEBOUNCE and TSADC_HIGHT_TSHUT_DEBOUNCE:
349 * If the temperature is higher than COMP_INT or COMP_SHUT for
350 * "debounce" times, TSADC controller will generate interrupt or TSHUT.
241 */ 351 */
242static void rk_tsadcv2_initialize(void __iomem *regs, 352static void rk_tsadcv2_initialize(void __iomem *regs,
243 enum tshut_polarity tshut_polarity) 353 enum tshut_polarity tshut_polarity)
@@ -279,20 +389,22 @@ static void rk_tsadcv2_control(void __iomem *regs, bool enable)
279 writel_relaxed(val, regs + TSADCV2_AUTO_CON); 389 writel_relaxed(val, regs + TSADCV2_AUTO_CON);
280} 390}
281 391
282static int rk_tsadcv2_get_temp(int chn, void __iomem *regs, int *temp) 392static int rk_tsadcv2_get_temp(struct chip_tsadc_table table,
393 int chn, void __iomem *regs, int *temp)
283{ 394{
284 u32 val; 395 u32 val;
285 396
286 val = readl_relaxed(regs + TSADCV2_DATA(chn)); 397 val = readl_relaxed(regs + TSADCV2_DATA(chn));
287 398
288 return rk_tsadcv2_code_to_temp(val, temp); 399 return rk_tsadcv2_code_to_temp(table, val, temp);
289} 400}
290 401
291static void rk_tsadcv2_tshut_temp(int chn, void __iomem *regs, long temp) 402static void rk_tsadcv2_tshut_temp(struct chip_tsadc_table table,
403 int chn, void __iomem *regs, int temp)
292{ 404{
293 u32 tshut_value, val; 405 u32 tshut_value, val;
294 406
295 tshut_value = rk_tsadcv2_temp_to_code(temp); 407 tshut_value = rk_tsadcv2_temp_to_code(table, temp);
296 writel_relaxed(tshut_value, regs + TSADCV2_COMP_SHUT(chn)); 408 writel_relaxed(tshut_value, regs + TSADCV2_COMP_SHUT(chn));
297 409
298 /* TSHUT will be valid */ 410 /* TSHUT will be valid */
@@ -318,6 +430,10 @@ static void rk_tsadcv2_tshut_mode(int chn, void __iomem *regs,
318} 430}
319 431
320static const struct rockchip_tsadc_chip rk3288_tsadc_data = { 432static const struct rockchip_tsadc_chip rk3288_tsadc_data = {
433 .chn_id[SENSOR_CPU] = 1, /* cpu sensor is channel 1 */
434 .chn_id[SENSOR_GPU] = 2, /* gpu sensor is channel 2 */
435 .chn_num = 2, /* two channels for tsadc */
436
321 .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */ 437 .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */
322 .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */ 438 .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
323 .tshut_temp = 95000, 439 .tshut_temp = 95000,
@@ -328,6 +444,37 @@ static const struct rockchip_tsadc_chip rk3288_tsadc_data = {
328 .get_temp = rk_tsadcv2_get_temp, 444 .get_temp = rk_tsadcv2_get_temp,
329 .set_tshut_temp = rk_tsadcv2_tshut_temp, 445 .set_tshut_temp = rk_tsadcv2_tshut_temp,
330 .set_tshut_mode = rk_tsadcv2_tshut_mode, 446 .set_tshut_mode = rk_tsadcv2_tshut_mode,
447
448 .table = {
449 .id = v2_code_table,
450 .length = ARRAY_SIZE(v2_code_table),
451 .data_mask = TSADCV2_DATA_MASK,
452 .mode = ADC_DECREMENT,
453 },
454};
455
456static const struct rockchip_tsadc_chip rk3368_tsadc_data = {
457 .chn_id[SENSOR_CPU] = 0, /* cpu sensor is channel 0 */
458 .chn_id[SENSOR_GPU] = 1, /* gpu sensor is channel 1 */
459 .chn_num = 2, /* two channels for tsadc */
460
461 .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */
462 .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
463 .tshut_temp = 95000,
464
465 .initialize = rk_tsadcv2_initialize,
466 .irq_ack = rk_tsadcv2_irq_ack,
467 .control = rk_tsadcv2_control,
468 .get_temp = rk_tsadcv2_get_temp,
469 .set_tshut_temp = rk_tsadcv2_tshut_temp,
470 .set_tshut_mode = rk_tsadcv2_tshut_mode,
471
472 .table = {
473 .id = v3_code_table,
474 .length = ARRAY_SIZE(v3_code_table),
475 .data_mask = TSADCV3_DATA_MASK,
476 .mode = ADC_INCREMENT,
477 },
331}; 478};
332 479
333static const struct of_device_id of_rockchip_thermal_match[] = { 480static const struct of_device_id of_rockchip_thermal_match[] = {
@@ -335,6 +482,10 @@ static const struct of_device_id of_rockchip_thermal_match[] = {
335 .compatible = "rockchip,rk3288-tsadc", 482 .compatible = "rockchip,rk3288-tsadc",
336 .data = (void *)&rk3288_tsadc_data, 483 .data = (void *)&rk3288_tsadc_data,
337 }, 484 },
485 {
486 .compatible = "rockchip,rk3368-tsadc",
487 .data = (void *)&rk3368_tsadc_data,
488 },
338 { /* end */ }, 489 { /* end */ },
339}; 490};
340MODULE_DEVICE_TABLE(of, of_rockchip_thermal_match); 491MODULE_DEVICE_TABLE(of, of_rockchip_thermal_match);
@@ -357,7 +508,7 @@ static irqreturn_t rockchip_thermal_alarm_irq_thread(int irq, void *dev)
357 508
358 thermal->chip->irq_ack(thermal->regs); 509 thermal->chip->irq_ack(thermal->regs);
359 510
360 for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) 511 for (i = 0; i < thermal->chip->chn_num; i++)
361 thermal_zone_device_update(thermal->sensors[i].tzd); 512 thermal_zone_device_update(thermal->sensors[i].tzd);
362 513
363 return IRQ_HANDLED; 514 return IRQ_HANDLED;
@@ -370,7 +521,8 @@ static int rockchip_thermal_get_temp(void *_sensor, int *out_temp)
370 const struct rockchip_tsadc_chip *tsadc = sensor->thermal->chip; 521 const struct rockchip_tsadc_chip *tsadc = sensor->thermal->chip;
371 int retval; 522 int retval;
372 523
373 retval = tsadc->get_temp(sensor->id, thermal->regs, out_temp); 524 retval = tsadc->get_temp(tsadc->table,
525 sensor->id, thermal->regs, out_temp);
374 dev_dbg(&thermal->pdev->dev, "sensor %d - temp: %d, retval: %d\n", 526 dev_dbg(&thermal->pdev->dev, "sensor %d - temp: %d, retval: %d\n",
375 sensor->id, *out_temp, retval); 527 sensor->id, *out_temp, retval);
376 528
@@ -389,7 +541,7 @@ static int rockchip_configure_from_dt(struct device *dev,
389 541
390 if (of_property_read_u32(np, "rockchip,hw-tshut-temp", &shut_temp)) { 542 if (of_property_read_u32(np, "rockchip,hw-tshut-temp", &shut_temp)) {
391 dev_warn(dev, 543 dev_warn(dev,
392 "Missing tshut temp property, using default %ld\n", 544 "Missing tshut temp property, using default %d\n",
393 thermal->chip->tshut_temp); 545 thermal->chip->tshut_temp);
394 thermal->tshut_temp = thermal->chip->tshut_temp; 546 thermal->tshut_temp = thermal->chip->tshut_temp;
395 } else { 547 } else {
@@ -397,7 +549,7 @@ static int rockchip_configure_from_dt(struct device *dev,
397 } 549 }
398 550
399 if (thermal->tshut_temp > INT_MAX) { 551 if (thermal->tshut_temp > INT_MAX) {
400 dev_err(dev, "Invalid tshut temperature specified: %ld\n", 552 dev_err(dev, "Invalid tshut temperature specified: %d\n",
401 thermal->tshut_temp); 553 thermal->tshut_temp);
402 return -ERANGE; 554 return -ERANGE;
403 } 555 }
@@ -442,13 +594,14 @@ static int
442rockchip_thermal_register_sensor(struct platform_device *pdev, 594rockchip_thermal_register_sensor(struct platform_device *pdev,
443 struct rockchip_thermal_data *thermal, 595 struct rockchip_thermal_data *thermal,
444 struct rockchip_thermal_sensor *sensor, 596 struct rockchip_thermal_sensor *sensor,
445 enum sensor_id id) 597 int id)
446{ 598{
447 const struct rockchip_tsadc_chip *tsadc = thermal->chip; 599 const struct rockchip_tsadc_chip *tsadc = thermal->chip;
448 int error; 600 int error;
449 601
450 tsadc->set_tshut_mode(id, thermal->regs, thermal->tshut_mode); 602 tsadc->set_tshut_mode(id, thermal->regs, thermal->tshut_mode);
451 tsadc->set_tshut_temp(id, thermal->regs, thermal->tshut_temp); 603 tsadc->set_tshut_temp(tsadc->table, id, thermal->regs,
604 thermal->tshut_temp);
452 605
453 sensor->thermal = thermal; 606 sensor->thermal = thermal;
454 sensor->id = id; 607 sensor->id = id;
@@ -481,7 +634,7 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
481 const struct of_device_id *match; 634 const struct of_device_id *match;
482 struct resource *res; 635 struct resource *res;
483 int irq; 636 int irq;
484 int i; 637 int i, j;
485 int error; 638 int error;
486 639
487 match = of_match_node(of_rockchip_thermal_match, np); 640 match = of_match_node(of_rockchip_thermal_match, np);
@@ -556,22 +709,19 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
556 709
557 thermal->chip->initialize(thermal->regs, thermal->tshut_polarity); 710 thermal->chip->initialize(thermal->regs, thermal->tshut_polarity);
558 711
559 error = rockchip_thermal_register_sensor(pdev, thermal, 712 for (i = 0; i < thermal->chip->chn_num; i++) {
560 &thermal->sensors[0], 713 error = rockchip_thermal_register_sensor(pdev, thermal,
561 SENSOR_CPU); 714 &thermal->sensors[i],
562 if (error) { 715 thermal->chip->chn_id[i]);
563 dev_err(&pdev->dev, 716 if (error) {
564 "failed to register CPU thermal sensor: %d\n", error); 717 dev_err(&pdev->dev,
565 goto err_disable_pclk; 718 "failed to register sensor[%d] : error = %d\n",
566 } 719 i, error);
567 720 for (j = 0; j < i; j++)
568 error = rockchip_thermal_register_sensor(pdev, thermal, 721 thermal_zone_of_sensor_unregister(&pdev->dev,
569 &thermal->sensors[1], 722 thermal->sensors[j].tzd);
570 SENSOR_GPU); 723 goto err_disable_pclk;
571 if (error) { 724 }
572 dev_err(&pdev->dev,
573 "failed to register GPU thermal sensor: %d\n", error);
574 goto err_unregister_cpu_sensor;
575 } 725 }
576 726
577 error = devm_request_threaded_irq(&pdev->dev, irq, NULL, 727 error = devm_request_threaded_irq(&pdev->dev, irq, NULL,
@@ -581,22 +731,23 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
581 if (error) { 731 if (error) {
582 dev_err(&pdev->dev, 732 dev_err(&pdev->dev,
583 "failed to request tsadc irq: %d\n", error); 733 "failed to request tsadc irq: %d\n", error);
584 goto err_unregister_gpu_sensor; 734 goto err_unregister_sensor;
585 } 735 }
586 736
587 thermal->chip->control(thermal->regs, true); 737 thermal->chip->control(thermal->regs, true);
588 738
589 for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) 739 for (i = 0; i < thermal->chip->chn_num; i++)
590 rockchip_thermal_toggle_sensor(&thermal->sensors[i], true); 740 rockchip_thermal_toggle_sensor(&thermal->sensors[i], true);
591 741
592 platform_set_drvdata(pdev, thermal); 742 platform_set_drvdata(pdev, thermal);
593 743
594 return 0; 744 return 0;
595 745
596err_unregister_gpu_sensor: 746err_unregister_sensor:
597 thermal_zone_of_sensor_unregister(&pdev->dev, thermal->sensors[1].tzd); 747 while (i--)
598err_unregister_cpu_sensor: 748 thermal_zone_of_sensor_unregister(&pdev->dev,
599 thermal_zone_of_sensor_unregister(&pdev->dev, thermal->sensors[0].tzd); 749 thermal->sensors[i].tzd);
750
600err_disable_pclk: 751err_disable_pclk:
601 clk_disable_unprepare(thermal->pclk); 752 clk_disable_unprepare(thermal->pclk);
602err_disable_clk: 753err_disable_clk:
@@ -610,7 +761,7 @@ static int rockchip_thermal_remove(struct platform_device *pdev)
610 struct rockchip_thermal_data *thermal = platform_get_drvdata(pdev); 761 struct rockchip_thermal_data *thermal = platform_get_drvdata(pdev);
611 int i; 762 int i;
612 763
613 for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) { 764 for (i = 0; i < thermal->chip->chn_num; i++) {
614 struct rockchip_thermal_sensor *sensor = &thermal->sensors[i]; 765 struct rockchip_thermal_sensor *sensor = &thermal->sensors[i];
615 766
616 rockchip_thermal_toggle_sensor(sensor, false); 767 rockchip_thermal_toggle_sensor(sensor, false);
@@ -631,7 +782,7 @@ static int __maybe_unused rockchip_thermal_suspend(struct device *dev)
631 struct rockchip_thermal_data *thermal = platform_get_drvdata(pdev); 782 struct rockchip_thermal_data *thermal = platform_get_drvdata(pdev);
632 int i; 783 int i;
633 784
634 for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) 785 for (i = 0; i < thermal->chip->chn_num; i++)
635 rockchip_thermal_toggle_sensor(&thermal->sensors[i], false); 786 rockchip_thermal_toggle_sensor(&thermal->sensors[i], false);
636 787
637 thermal->chip->control(thermal->regs, false); 788 thermal->chip->control(thermal->regs, false);
@@ -663,18 +814,19 @@ static int __maybe_unused rockchip_thermal_resume(struct device *dev)
663 814
664 thermal->chip->initialize(thermal->regs, thermal->tshut_polarity); 815 thermal->chip->initialize(thermal->regs, thermal->tshut_polarity);
665 816
666 for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) { 817 for (i = 0; i < thermal->chip->chn_num; i++) {
667 enum sensor_id id = thermal->sensors[i].id; 818 int id = thermal->sensors[i].id;
668 819
669 thermal->chip->set_tshut_mode(id, thermal->regs, 820 thermal->chip->set_tshut_mode(id, thermal->regs,
670 thermal->tshut_mode); 821 thermal->tshut_mode);
671 thermal->chip->set_tshut_temp(id, thermal->regs, 822 thermal->chip->set_tshut_temp(thermal->chip->table,
823 id, thermal->regs,
672 thermal->tshut_temp); 824 thermal->tshut_temp);
673 } 825 }
674 826
675 thermal->chip->control(thermal->regs, true); 827 thermal->chip->control(thermal->regs, true);
676 828
677 for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) 829 for (i = 0; i < thermal->chip->chn_num; i++)
678 rockchip_thermal_toggle_sensor(&thermal->sensors[i], true); 830 rockchip_thermal_toggle_sensor(&thermal->sensors[i], true);
679 831
680 pinctrl_pm_select_default_state(dev); 832 pinctrl_pm_select_default_state(dev);
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 13844261cd5f..ed776149261e 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -169,7 +169,7 @@ static inline int tty_copy_to_user(struct tty_struct *tty,
169{ 169{
170 struct n_tty_data *ldata = tty->disc_data; 170 struct n_tty_data *ldata = tty->disc_data;
171 171
172 tty_audit_add_data(tty, to, n, ldata->icanon); 172 tty_audit_add_data(tty, from, n, ldata->icanon);
173 return copy_to_user(to, from, n); 173 return copy_to_user(to, from, n);
174} 174}
175 175
diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c
index c0533a57ec53..910bfee5a88b 100644
--- a/drivers/tty/serial/8250/8250_fsl.c
+++ b/drivers/tty/serial/8250/8250_fsl.c
@@ -60,3 +60,4 @@ int fsl8250_handle_irq(struct uart_port *port)
60 spin_unlock_irqrestore(&up->port.lock, flags); 60 spin_unlock_irqrestore(&up->port.lock, flags);
61 return 1; 61 return 1;
62} 62}
63EXPORT_SYMBOL_GPL(fsl8250_handle_irq);
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index e6f5e12a2d83..6412f1455beb 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -373,6 +373,7 @@ config SERIAL_8250_MID
373 depends on SERIAL_8250 && PCI 373 depends on SERIAL_8250 && PCI
374 select HSU_DMA if SERIAL_8250_DMA 374 select HSU_DMA if SERIAL_8250_DMA
375 select HSU_DMA_PCI if X86_INTEL_MID 375 select HSU_DMA_PCI if X86_INTEL_MID
376 select RATIONAL
376 help 377 help
377 Selecting this option will enable handling of the extra features 378 Selecting this option will enable handling of the extra features
378 present on the UART found on Intel Medfield SOC and various other 379 present on the UART found on Intel Medfield SOC and various other
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 1aec4404062d..f38beb28e7ae 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1539,7 +1539,6 @@ config SERIAL_FSL_LPUART
1539 tristate "Freescale lpuart serial port support" 1539 tristate "Freescale lpuart serial port support"
1540 depends on HAS_DMA 1540 depends on HAS_DMA
1541 select SERIAL_CORE 1541 select SERIAL_CORE
1542 select SERIAL_EARLYCON
1543 help 1542 help
1544 Support for the on-chip lpuart on some Freescale SOCs. 1543 Support for the on-chip lpuart on some Freescale SOCs.
1545 1544
@@ -1547,6 +1546,7 @@ config SERIAL_FSL_LPUART_CONSOLE
1547 bool "Console on Freescale lpuart serial port" 1546 bool "Console on Freescale lpuart serial port"
1548 depends on SERIAL_FSL_LPUART=y 1547 depends on SERIAL_FSL_LPUART=y
1549 select SERIAL_CORE_CONSOLE 1548 select SERIAL_CORE_CONSOLE
1549 select SERIAL_EARLYCON
1550 help 1550 help
1551 If you have enabled the lpuart serial port on the Freescale SoCs, 1551 If you have enabled the lpuart serial port on the Freescale SoCs,
1552 you can make it the console by answering Y to this option. 1552 you can make it the console by answering Y to this option.
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index 681e0f3d5e0e..a1c0a89d9c7f 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -474,7 +474,7 @@ static int bcm_uart_startup(struct uart_port *port)
474 474
475 /* register irq and enable rx interrupts */ 475 /* register irq and enable rx interrupts */
476 ret = request_irq(port->irq, bcm_uart_interrupt, 0, 476 ret = request_irq(port->irq, bcm_uart_interrupt, 0,
477 bcm_uart_type(port), port); 477 dev_name(port->dev), port);
478 if (ret) 478 if (ret)
479 return ret; 479 return ret;
480 bcm_uart_writel(port, UART_RX_INT_MASK, UART_IR_REG); 480 bcm_uart_writel(port, UART_RX_INT_MASK, UART_IR_REG);
diff --git a/drivers/tty/serial/etraxfs-uart.c b/drivers/tty/serial/etraxfs-uart.c
index 6813e316e9ff..2f80bc7e44fb 100644
--- a/drivers/tty/serial/etraxfs-uart.c
+++ b/drivers/tty/serial/etraxfs-uart.c
@@ -894,7 +894,7 @@ static int etraxfs_uart_probe(struct platform_device *pdev)
894 up->regi_ser = of_iomap(np, 0); 894 up->regi_ser = of_iomap(np, 0);
895 up->port.dev = &pdev->dev; 895 up->port.dev = &pdev->dev;
896 896
897 up->gpios = mctrl_gpio_init(&pdev->dev, 0); 897 up->gpios = mctrl_gpio_init_noauto(&pdev->dev, 0);
898 if (IS_ERR(up->gpios)) 898 if (IS_ERR(up->gpios))
899 return PTR_ERR(up->gpios); 899 return PTR_ERR(up->gpios);
900 900
diff --git a/drivers/tty/tty_audit.c b/drivers/tty/tty_audit.c
index 90ca082935f6..3d245cd3d8e6 100644
--- a/drivers/tty/tty_audit.c
+++ b/drivers/tty/tty_audit.c
@@ -265,7 +265,7 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty,
265 * 265 *
266 * Audit @data of @size from @tty, if necessary. 266 * Audit @data of @size from @tty, if necessary.
267 */ 267 */
268void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, 268void tty_audit_add_data(struct tty_struct *tty, const void *data,
269 size_t size, unsigned icanon) 269 size_t size, unsigned icanon)
270{ 270{
271 struct tty_audit_buf *buf; 271 struct tty_audit_buf *buf;
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 0c41dbcb90b8..bcc8e1e8bb72 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1282,18 +1282,22 @@ int tty_send_xchar(struct tty_struct *tty, char ch)
1282 int was_stopped = tty->stopped; 1282 int was_stopped = tty->stopped;
1283 1283
1284 if (tty->ops->send_xchar) { 1284 if (tty->ops->send_xchar) {
1285 down_read(&tty->termios_rwsem);
1285 tty->ops->send_xchar(tty, ch); 1286 tty->ops->send_xchar(tty, ch);
1287 up_read(&tty->termios_rwsem);
1286 return 0; 1288 return 0;
1287 } 1289 }
1288 1290
1289 if (tty_write_lock(tty, 0) < 0) 1291 if (tty_write_lock(tty, 0) < 0)
1290 return -ERESTARTSYS; 1292 return -ERESTARTSYS;
1291 1293
1294 down_read(&tty->termios_rwsem);
1292 if (was_stopped) 1295 if (was_stopped)
1293 start_tty(tty); 1296 start_tty(tty);
1294 tty->ops->write(tty, &ch, 1); 1297 tty->ops->write(tty, &ch, 1);
1295 if (was_stopped) 1298 if (was_stopped)
1296 stop_tty(tty); 1299 stop_tty(tty);
1300 up_read(&tty->termios_rwsem);
1297 tty_write_unlock(tty); 1301 tty_write_unlock(tty);
1298 return 0; 1302 return 0;
1299} 1303}
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index 9c5aebfe7053..1445dd39aa62 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -1147,16 +1147,12 @@ int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file,
1147 spin_unlock_irq(&tty->flow_lock); 1147 spin_unlock_irq(&tty->flow_lock);
1148 break; 1148 break;
1149 case TCIOFF: 1149 case TCIOFF:
1150 down_read(&tty->termios_rwsem);
1151 if (STOP_CHAR(tty) != __DISABLED_CHAR) 1150 if (STOP_CHAR(tty) != __DISABLED_CHAR)
1152 retval = tty_send_xchar(tty, STOP_CHAR(tty)); 1151 retval = tty_send_xchar(tty, STOP_CHAR(tty));
1153 up_read(&tty->termios_rwsem);
1154 break; 1152 break;
1155 case TCION: 1153 case TCION:
1156 down_read(&tty->termios_rwsem);
1157 if (START_CHAR(tty) != __DISABLED_CHAR) 1154 if (START_CHAR(tty) != __DISABLED_CHAR)
1158 retval = tty_send_xchar(tty, START_CHAR(tty)); 1155 retval = tty_send_xchar(tty, START_CHAR(tty));
1159 up_read(&tty->termios_rwsem);
1160 break; 1156 break;
1161 default: 1157 default:
1162 return -EINVAL; 1158 return -EINVAL;
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 5af8f1874c1a..629e3c865072 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -592,7 +592,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
592 592
593 /* Restart the work queue in case no characters kick it off. Safe if 593 /* Restart the work queue in case no characters kick it off. Safe if
594 already running */ 594 already running */
595 schedule_work(&tty->port->buf.work); 595 tty_buffer_restart_work(tty->port);
596 596
597 tty_unlock(tty); 597 tty_unlock(tty);
598 return retval; 598 return retval;
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 6ccbf60cdd5c..5a048b7b92e8 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -84,6 +84,12 @@ struct ci_hdrc_imx_data {
84 struct imx_usbmisc_data *usbmisc_data; 84 struct imx_usbmisc_data *usbmisc_data;
85 bool supports_runtime_pm; 85 bool supports_runtime_pm;
86 bool in_lpm; 86 bool in_lpm;
87 /* SoC before i.mx6 (except imx23/imx28) needs three clks */
88 bool need_three_clks;
89 struct clk *clk_ipg;
90 struct clk *clk_ahb;
91 struct clk *clk_per;
92 /* --------------------------------- */
87}; 93};
88 94
89/* Common functions shared by usbmisc drivers */ 95/* Common functions shared by usbmisc drivers */
@@ -135,6 +141,102 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev)
135} 141}
136 142
137/* End of common functions shared by usbmisc drivers*/ 143/* End of common functions shared by usbmisc drivers*/
144static int imx_get_clks(struct device *dev)
145{
146 struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
147 int ret = 0;
148
149 data->clk_ipg = devm_clk_get(dev, "ipg");
150 if (IS_ERR(data->clk_ipg)) {
151 /* If the platform only needs one clocks */
152 data->clk = devm_clk_get(dev, NULL);
153 if (IS_ERR(data->clk)) {
154 ret = PTR_ERR(data->clk);
155 dev_err(dev,
156 "Failed to get clks, err=%ld,%ld\n",
157 PTR_ERR(data->clk), PTR_ERR(data->clk_ipg));
158 return ret;
159 }
160 return ret;
161 }
162
163 data->clk_ahb = devm_clk_get(dev, "ahb");
164 if (IS_ERR(data->clk_ahb)) {
165 ret = PTR_ERR(data->clk_ahb);
166 dev_err(dev,
167 "Failed to get ahb clock, err=%d\n", ret);
168 return ret;
169 }
170
171 data->clk_per = devm_clk_get(dev, "per");
172 if (IS_ERR(data->clk_per)) {
173 ret = PTR_ERR(data->clk_per);
174 dev_err(dev,
175 "Failed to get per clock, err=%d\n", ret);
176 return ret;
177 }
178
179 data->need_three_clks = true;
180 return ret;
181}
182
183static int imx_prepare_enable_clks(struct device *dev)
184{
185 struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
186 int ret = 0;
187
188 if (data->need_three_clks) {
189 ret = clk_prepare_enable(data->clk_ipg);
190 if (ret) {
191 dev_err(dev,
192 "Failed to prepare/enable ipg clk, err=%d\n",
193 ret);
194 return ret;
195 }
196
197 ret = clk_prepare_enable(data->clk_ahb);
198 if (ret) {
199 dev_err(dev,
200 "Failed to prepare/enable ahb clk, err=%d\n",
201 ret);
202 clk_disable_unprepare(data->clk_ipg);
203 return ret;
204 }
205
206 ret = clk_prepare_enable(data->clk_per);
207 if (ret) {
208 dev_err(dev,
209 "Failed to prepare/enable per clk, err=%d\n",
210 ret);
211 clk_disable_unprepare(data->clk_ahb);
212 clk_disable_unprepare(data->clk_ipg);
213 return ret;
214 }
215 } else {
216 ret = clk_prepare_enable(data->clk);
217 if (ret) {
218 dev_err(dev,
219 "Failed to prepare/enable clk, err=%d\n",
220 ret);
221 return ret;
222 }
223 }
224
225 return ret;
226}
227
228static void imx_disable_unprepare_clks(struct device *dev)
229{
230 struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
231
232 if (data->need_three_clks) {
233 clk_disable_unprepare(data->clk_per);
234 clk_disable_unprepare(data->clk_ahb);
235 clk_disable_unprepare(data->clk_ipg);
236 } else {
237 clk_disable_unprepare(data->clk);
238 }
239}
138 240
139static int ci_hdrc_imx_probe(struct platform_device *pdev) 241static int ci_hdrc_imx_probe(struct platform_device *pdev)
140{ 242{
@@ -145,31 +247,31 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
145 .flags = CI_HDRC_SET_NON_ZERO_TTHA, 247 .flags = CI_HDRC_SET_NON_ZERO_TTHA,
146 }; 248 };
147 int ret; 249 int ret;
148 const struct of_device_id *of_id = 250 const struct of_device_id *of_id;
149 of_match_device(ci_hdrc_imx_dt_ids, &pdev->dev); 251 const struct ci_hdrc_imx_platform_flag *imx_platform_flag;
150 const struct ci_hdrc_imx_platform_flag *imx_platform_flag = of_id->data; 252
253 of_id = of_match_device(ci_hdrc_imx_dt_ids, &pdev->dev);
254 if (!of_id)
255 return -ENODEV;
256
257 imx_platform_flag = of_id->data;
151 258
152 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); 259 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
153 if (!data) 260 if (!data)
154 return -ENOMEM; 261 return -ENOMEM;
155 262
263 platform_set_drvdata(pdev, data);
156 data->usbmisc_data = usbmisc_get_init_data(&pdev->dev); 264 data->usbmisc_data = usbmisc_get_init_data(&pdev->dev);
157 if (IS_ERR(data->usbmisc_data)) 265 if (IS_ERR(data->usbmisc_data))
158 return PTR_ERR(data->usbmisc_data); 266 return PTR_ERR(data->usbmisc_data);
159 267
160 data->clk = devm_clk_get(&pdev->dev, NULL); 268 ret = imx_get_clks(&pdev->dev);
161 if (IS_ERR(data->clk)) { 269 if (ret)
162 dev_err(&pdev->dev, 270 return ret;
163 "Failed to get clock, err=%ld\n", PTR_ERR(data->clk));
164 return PTR_ERR(data->clk);
165 }
166 271
167 ret = clk_prepare_enable(data->clk); 272 ret = imx_prepare_enable_clks(&pdev->dev);
168 if (ret) { 273 if (ret)
169 dev_err(&pdev->dev,
170 "Failed to prepare or enable clock, err=%d\n", ret);
171 return ret; 274 return ret;
172 }
173 275
174 data->phy = devm_usb_get_phy_by_phandle(&pdev->dev, "fsl,usbphy", 0); 276 data->phy = devm_usb_get_phy_by_phandle(&pdev->dev, "fsl,usbphy", 0);
175 if (IS_ERR(data->phy)) { 277 if (IS_ERR(data->phy)) {
@@ -212,8 +314,6 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
212 goto disable_device; 314 goto disable_device;
213 } 315 }
214 316
215 platform_set_drvdata(pdev, data);
216
217 if (data->supports_runtime_pm) { 317 if (data->supports_runtime_pm) {
218 pm_runtime_set_active(&pdev->dev); 318 pm_runtime_set_active(&pdev->dev);
219 pm_runtime_enable(&pdev->dev); 319 pm_runtime_enable(&pdev->dev);
@@ -226,7 +326,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
226disable_device: 326disable_device:
227 ci_hdrc_remove_device(data->ci_pdev); 327 ci_hdrc_remove_device(data->ci_pdev);
228err_clk: 328err_clk:
229 clk_disable_unprepare(data->clk); 329 imx_disable_unprepare_clks(&pdev->dev);
230 return ret; 330 return ret;
231} 331}
232 332
@@ -240,7 +340,7 @@ static int ci_hdrc_imx_remove(struct platform_device *pdev)
240 pm_runtime_put_noidle(&pdev->dev); 340 pm_runtime_put_noidle(&pdev->dev);
241 } 341 }
242 ci_hdrc_remove_device(data->ci_pdev); 342 ci_hdrc_remove_device(data->ci_pdev);
243 clk_disable_unprepare(data->clk); 343 imx_disable_unprepare_clks(&pdev->dev);
244 344
245 return 0; 345 return 0;
246} 346}
@@ -252,7 +352,7 @@ static int imx_controller_suspend(struct device *dev)
252 352
253 dev_dbg(dev, "at %s\n", __func__); 353 dev_dbg(dev, "at %s\n", __func__);
254 354
255 clk_disable_unprepare(data->clk); 355 imx_disable_unprepare_clks(dev);
256 data->in_lpm = true; 356 data->in_lpm = true;
257 357
258 return 0; 358 return 0;
@@ -270,7 +370,7 @@ static int imx_controller_resume(struct device *dev)
270 return 0; 370 return 0;
271 } 371 }
272 372
273 ret = clk_prepare_enable(data->clk); 373 ret = imx_prepare_enable_clks(dev);
274 if (ret) 374 if (ret)
275 return ret; 375 return ret;
276 376
@@ -285,7 +385,7 @@ static int imx_controller_resume(struct device *dev)
285 return 0; 385 return 0;
286 386
287clk_disable: 387clk_disable:
288 clk_disable_unprepare(data->clk); 388 imx_disable_unprepare_clks(dev);
289 return ret; 389 return ret;
290} 390}
291 391
diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
index 080b7be3daf0..58c8485a0715 100644
--- a/drivers/usb/chipidea/debug.c
+++ b/drivers/usb/chipidea/debug.c
@@ -322,8 +322,10 @@ static ssize_t ci_role_write(struct file *file, const char __user *ubuf,
322 return -EINVAL; 322 return -EINVAL;
323 323
324 pm_runtime_get_sync(ci->dev); 324 pm_runtime_get_sync(ci->dev);
325 disable_irq(ci->irq);
325 ci_role_stop(ci); 326 ci_role_stop(ci);
326 ret = ci_role_start(ci, role); 327 ret = ci_role_start(ci, role);
328 enable_irq(ci->irq);
327 pm_runtime_put_sync(ci->dev); 329 pm_runtime_put_sync(ci->dev);
328 330
329 return ret ? ret : count; 331 return ret ? ret : count;
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 8223fe73ea85..391a1225b0ba 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1751,6 +1751,22 @@ static int ci_udc_start(struct usb_gadget *gadget,
1751 return retval; 1751 return retval;
1752} 1752}
1753 1753
1754static void ci_udc_stop_for_otg_fsm(struct ci_hdrc *ci)
1755{
1756 if (!ci_otg_is_fsm_mode(ci))
1757 return;
1758
1759 mutex_lock(&ci->fsm.lock);
1760 if (ci->fsm.otg->state == OTG_STATE_A_PERIPHERAL) {
1761 ci->fsm.a_bidl_adis_tmout = 1;
1762 ci_hdrc_otg_fsm_start(ci);
1763 } else if (ci->fsm.otg->state == OTG_STATE_B_PERIPHERAL) {
1764 ci->fsm.protocol = PROTO_UNDEF;
1765 ci->fsm.otg->state = OTG_STATE_UNDEFINED;
1766 }
1767 mutex_unlock(&ci->fsm.lock);
1768}
1769
1754/** 1770/**
1755 * ci_udc_stop: unregister a gadget driver 1771 * ci_udc_stop: unregister a gadget driver
1756 */ 1772 */
@@ -1775,6 +1791,7 @@ static int ci_udc_stop(struct usb_gadget *gadget)
1775 ci->driver = NULL; 1791 ci->driver = NULL;
1776 spin_unlock_irqrestore(&ci->lock, flags); 1792 spin_unlock_irqrestore(&ci->lock, flags);
1777 1793
1794 ci_udc_stop_for_otg_fsm(ci);
1778 return 0; 1795 return 0;
1779} 1796}
1780 1797
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index fcea4eb36eee..ab8b027e8cc8 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -500,7 +500,11 @@ static int usbmisc_imx_probe(struct platform_device *pdev)
500{ 500{
501 struct resource *res; 501 struct resource *res;
502 struct imx_usbmisc *data; 502 struct imx_usbmisc *data;
503 struct of_device_id *tmp_dev; 503 const struct of_device_id *of_id;
504
505 of_id = of_match_device(usbmisc_imx_dt_ids, &pdev->dev);
506 if (!of_id)
507 return -ENODEV;
504 508
505 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); 509 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
506 if (!data) 510 if (!data)
@@ -513,9 +517,7 @@ static int usbmisc_imx_probe(struct platform_device *pdev)
513 if (IS_ERR(data->base)) 517 if (IS_ERR(data->base))
514 return PTR_ERR(data->base); 518 return PTR_ERR(data->base);
515 519
516 tmp_dev = (struct of_device_id *) 520 data->ops = (const struct usbmisc_ops *)of_id->data;
517 of_match_device(usbmisc_imx_dt_ids, &pdev->dev);
518 data->ops = (const struct usbmisc_ops *)tmp_dev->data;
519 platform_set_drvdata(pdev, data); 521 platform_set_drvdata(pdev, data);
520 522
521 return 0; 523 return 0;
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 433bbc34a8a4..071964c7847f 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -884,11 +884,11 @@ static int usblp_wwait(struct usblp *usblp, int nonblock)
884 884
885 add_wait_queue(&usblp->wwait, &waita); 885 add_wait_queue(&usblp->wwait, &waita);
886 for (;;) { 886 for (;;) {
887 set_current_state(TASK_INTERRUPTIBLE);
888 if (mutex_lock_interruptible(&usblp->mut)) { 887 if (mutex_lock_interruptible(&usblp->mut)) {
889 rc = -EINTR; 888 rc = -EINTR;
890 break; 889 break;
891 } 890 }
891 set_current_state(TASK_INTERRUPTIBLE);
892 rc = usblp_wtest(usblp, nonblock); 892 rc = usblp_wtest(usblp, nonblock);
893 mutex_unlock(&usblp->mut); 893 mutex_unlock(&usblp->mut);
894 if (rc <= 0) 894 if (rc <= 0)
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index a99c89e78126..dd280108758f 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -77,8 +77,7 @@ config USB_OTG_BLACKLIST_HUB
77 77
78config USB_OTG_FSM 78config USB_OTG_FSM
79 tristate "USB 2.0 OTG FSM implementation" 79 tristate "USB 2.0 OTG FSM implementation"
80 depends on USB 80 depends on USB && USB_OTG
81 select USB_OTG
82 select USB_PHY 81 select USB_PHY
83 help 82 help
84 Implements OTG Finite State Machine as specified in On-The-Go 83 Implements OTG Finite State Machine as specified in On-The-Go
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index e79baf73c234..571c21727ff9 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -324,12 +324,13 @@ void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg)
324 */ 324 */
325static void dwc2_hcd_rem_wakeup(struct dwc2_hsotg *hsotg) 325static void dwc2_hcd_rem_wakeup(struct dwc2_hsotg *hsotg)
326{ 326{
327 if (hsotg->lx_state == DWC2_L2) { 327 if (hsotg->bus_suspended) {
328 hsotg->flags.b.port_suspend_change = 1; 328 hsotg->flags.b.port_suspend_change = 1;
329 usb_hcd_resume_root_hub(hsotg->priv); 329 usb_hcd_resume_root_hub(hsotg->priv);
330 } else {
331 hsotg->flags.b.port_l1_change = 1;
332 } 330 }
331
332 if (hsotg->lx_state == DWC2_L1)
333 hsotg->flags.b.port_l1_change = 1;
333} 334}
334 335
335/** 336/**
@@ -1428,8 +1429,8 @@ static void dwc2_wakeup_detected(unsigned long data)
1428 dev_dbg(hsotg->dev, "Clear Resume: HPRT0=%0x\n", 1429 dev_dbg(hsotg->dev, "Clear Resume: HPRT0=%0x\n",
1429 dwc2_readl(hsotg->regs + HPRT0)); 1430 dwc2_readl(hsotg->regs + HPRT0));
1430 1431
1431 hsotg->bus_suspended = 0;
1432 dwc2_hcd_rem_wakeup(hsotg); 1432 dwc2_hcd_rem_wakeup(hsotg);
1433 hsotg->bus_suspended = 0;
1433 1434
1434 /* Change to L0 state */ 1435 /* Change to L0 state */
1435 hsotg->lx_state = DWC2_L0; 1436 hsotg->lx_state = DWC2_L0;
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 5859b0fa19ee..e61d773cf65e 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -108,7 +108,8 @@ static const struct dwc2_core_params params_rk3066 = {
108 .host_ls_low_power_phy_clk = -1, 108 .host_ls_low_power_phy_clk = -1,
109 .ts_dline = -1, 109 .ts_dline = -1,
110 .reload_ctl = -1, 110 .reload_ctl = -1,
111 .ahbcfg = 0x7, /* INCR16 */ 111 .ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
112 GAHBCFG_HBSTLEN_SHIFT,
112 .uframe_sched = -1, 113 .uframe_sched = -1,
113 .external_id_pin_ctl = -1, 114 .external_id_pin_ctl = -1,
114 .hibernation = -1, 115 .hibernation = -1,
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 77a622cb48ab..009d83048c8c 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -34,6 +34,8 @@
34#define PCI_DEVICE_ID_INTEL_BSW 0x22b7 34#define PCI_DEVICE_ID_INTEL_BSW 0x22b7
35#define PCI_DEVICE_ID_INTEL_SPTLP 0x9d30 35#define PCI_DEVICE_ID_INTEL_SPTLP 0x9d30
36#define PCI_DEVICE_ID_INTEL_SPTH 0xa130 36#define PCI_DEVICE_ID_INTEL_SPTH 0xa130
37#define PCI_DEVICE_ID_INTEL_BXT 0x0aaa
38#define PCI_DEVICE_ID_INTEL_APL 0x5aaa
37 39
38static const struct acpi_gpio_params reset_gpios = { 0, 0, false }; 40static const struct acpi_gpio_params reset_gpios = { 0, 0, false };
39static const struct acpi_gpio_params cs_gpios = { 1, 0, false }; 41static const struct acpi_gpio_params cs_gpios = { 1, 0, false };
@@ -210,6 +212,8 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
210 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), }, 212 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), },
211 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTLP), }, 213 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTLP), },
212 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), }, 214 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), },
215 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), },
216 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), },
213 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, 217 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
214 { } /* Terminating Entry */ 218 { } /* Terminating Entry */
215}; 219};
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 55ba447fdf8b..e24a01cc98df 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2744,12 +2744,34 @@ int dwc3_gadget_init(struct dwc3 *dwc)
2744 } 2744 }
2745 2745
2746 dwc->gadget.ops = &dwc3_gadget_ops; 2746 dwc->gadget.ops = &dwc3_gadget_ops;
2747 dwc->gadget.max_speed = USB_SPEED_SUPER;
2748 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2747 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2749 dwc->gadget.sg_supported = true; 2748 dwc->gadget.sg_supported = true;
2750 dwc->gadget.name = "dwc3-gadget"; 2749 dwc->gadget.name = "dwc3-gadget";
2751 2750
2752 /* 2751 /*
2752 * FIXME We might be setting max_speed to <SUPER, however versions
2753 * <2.20a of dwc3 have an issue with metastability (documented
2754 * elsewhere in this driver) which tells us we can't set max speed to
2755 * anything lower than SUPER.
2756 *
2757 * Because gadget.max_speed is only used by composite.c and function
2758 * drivers (i.e. it won't go into dwc3's registers) we are allowing this
2759 * to happen so we avoid sending SuperSpeed Capability descriptor
2760 * together with our BOS descriptor as that could confuse host into
2761 * thinking we can handle super speed.
2762 *
2763 * Note that, in fact, we won't even support GetBOS requests when speed
2764 * is less than super speed because we don't have means, yet, to tell
2765 * composite.c that we are USB 2.0 + LPM ECN.
2766 */
2767 if (dwc->revision < DWC3_REVISION_220A)
2768 dwc3_trace(trace_dwc3_gadget,
2769 "Changing max_speed on rev %08x\n",
2770 dwc->revision);
2771
2772 dwc->gadget.max_speed = dwc->maximum_speed;
2773
2774 /*
2753 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize 2775 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize
2754 * on ep out. 2776 * on ep out.
2755 */ 2777 */
diff --git a/drivers/usb/gadget/function/f_loopback.c b/drivers/usb/gadget/function/f_loopback.c
index 23933bdf2d9d..ddc3aad886b7 100644
--- a/drivers/usb/gadget/function/f_loopback.c
+++ b/drivers/usb/gadget/function/f_loopback.c
@@ -329,7 +329,7 @@ static int alloc_requests(struct usb_composite_dev *cdev,
329 for (i = 0; i < loop->qlen && result == 0; i++) { 329 for (i = 0; i < loop->qlen && result == 0; i++) {
330 result = -ENOMEM; 330 result = -ENOMEM;
331 331
332 in_req = usb_ep_alloc_request(loop->in_ep, GFP_KERNEL); 332 in_req = usb_ep_alloc_request(loop->in_ep, GFP_ATOMIC);
333 if (!in_req) 333 if (!in_req)
334 goto fail; 334 goto fail;
335 335
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index f0f2b066ac08..f92f5aff0dd5 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -1633,7 +1633,7 @@ static irqreturn_t usba_udc_irq(int irq, void *devid)
1633 spin_lock(&udc->lock); 1633 spin_lock(&udc->lock);
1634 1634
1635 int_enb = usba_int_enb_get(udc); 1635 int_enb = usba_int_enb_get(udc);
1636 status = usba_readl(udc, INT_STA) & int_enb; 1636 status = usba_readl(udc, INT_STA) & (int_enb | USBA_HIGH_SPEED);
1637 DBG(DBG_INT, "irq, status=%#08x\n", status); 1637 DBG(DBG_INT, "irq, status=%#08x\n", status);
1638 1638
1639 if (status & USBA_DET_SUSPEND) { 1639 if (status & USBA_DET_SUSPEND) {
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 5d2d7e954bd4..0230965fb78c 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -782,12 +782,15 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
782 status |= USB_PORT_STAT_SUSPEND; 782 status |= USB_PORT_STAT_SUSPEND;
783 } 783 }
784 } 784 }
785 if ((raw_port_status & PORT_PLS_MASK) == XDEV_U0 785 if ((raw_port_status & PORT_PLS_MASK) == XDEV_U0 &&
786 && (raw_port_status & PORT_POWER) 786 (raw_port_status & PORT_POWER)) {
787 && (bus_state->suspended_ports & (1 << wIndex))) { 787 if (bus_state->suspended_ports & (1 << wIndex)) {
788 bus_state->suspended_ports &= ~(1 << wIndex); 788 bus_state->suspended_ports &= ~(1 << wIndex);
789 if (hcd->speed < HCD_USB3) 789 if (hcd->speed < HCD_USB3)
790 bus_state->port_c_suspend |= 1 << wIndex; 790 bus_state->port_c_suspend |= 1 << wIndex;
791 }
792 bus_state->resume_done[wIndex] = 0;
793 clear_bit(wIndex, &bus_state->resuming_ports);
791 } 794 }
792 if (raw_port_status & PORT_CONNECT) { 795 if (raw_port_status & PORT_CONNECT) {
793 status |= USB_PORT_STAT_CONNECTION; 796 status |= USB_PORT_STAT_CONNECTION;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index fa836251ca21..6c5e8133cf87 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -3896,28 +3896,6 @@ cleanup:
3896 return ret; 3896 return ret;
3897} 3897}
3898 3898
3899static int ep_ring_is_processing(struct xhci_hcd *xhci,
3900 int slot_id, unsigned int ep_index)
3901{
3902 struct xhci_virt_device *xdev;
3903 struct xhci_ring *ep_ring;
3904 struct xhci_ep_ctx *ep_ctx;
3905 struct xhci_virt_ep *xep;
3906 dma_addr_t hw_deq;
3907
3908 xdev = xhci->devs[slot_id];
3909 xep = &xhci->devs[slot_id]->eps[ep_index];
3910 ep_ring = xep->ring;
3911 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
3912
3913 if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) != EP_STATE_RUNNING)
3914 return 0;
3915
3916 hw_deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
3917 return (hw_deq !=
3918 xhci_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue));
3919}
3920
3921/* 3899/*
3922 * Check transfer ring to guarantee there is enough room for the urb. 3900 * Check transfer ring to guarantee there is enough room for the urb.
3923 * Update ISO URB start_frame and interval. 3901 * Update ISO URB start_frame and interval.
@@ -3983,10 +3961,12 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
3983 } 3961 }
3984 3962
3985 /* Calculate the start frame and put it in urb->start_frame. */ 3963 /* Calculate the start frame and put it in urb->start_frame. */
3986 if (HCC_CFC(xhci->hcc_params) && 3964 if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) {
3987 ep_ring_is_processing(xhci, slot_id, ep_index)) { 3965 if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
3988 urb->start_frame = xep->next_frame_id; 3966 EP_STATE_RUNNING) {
3989 goto skip_start_over; 3967 urb->start_frame = xep->next_frame_id;
3968 goto skip_start_over;
3969 }
3990 } 3970 }
3991 3971
3992 start_frame = readl(&xhci->run_regs->microframe_index); 3972 start_frame = readl(&xhci->run_regs->microframe_index);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 6e7dc6f93978..dfa44d3e8eee 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -175,6 +175,16 @@ int xhci_reset(struct xhci_hcd *xhci)
175 command |= CMD_RESET; 175 command |= CMD_RESET;
176 writel(command, &xhci->op_regs->command); 176 writel(command, &xhci->op_regs->command);
177 177
178 /* Existing Intel xHCI controllers require a delay of 1 mS,
179 * after setting the CMD_RESET bit, and before accessing any
180 * HC registers. This allows the HC to complete the
181 * reset operation and be ready for HC register access.
182 * Without this delay, the subsequent HC register access,
183 * may result in a system hang very rarely.
184 */
185 if (xhci->quirks & XHCI_INTEL_HOST)
186 udelay(1000);
187
178 ret = xhci_handshake(&xhci->op_regs->command, 188 ret = xhci_handshake(&xhci->op_regs->command,
179 CMD_RESET, 0, 10 * 1000 * 1000); 189 CMD_RESET, 0, 10 * 1000 * 1000);
180 if (ret) 190 if (ret)
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index ba13529cbd52..18cfc0a361cb 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -132,7 +132,7 @@ static inline struct musb *dev_to_musb(struct device *dev)
132/*-------------------------------------------------------------------------*/ 132/*-------------------------------------------------------------------------*/
133 133
134#ifndef CONFIG_BLACKFIN 134#ifndef CONFIG_BLACKFIN
135static int musb_ulpi_read(struct usb_phy *phy, u32 offset) 135static int musb_ulpi_read(struct usb_phy *phy, u32 reg)
136{ 136{
137 void __iomem *addr = phy->io_priv; 137 void __iomem *addr = phy->io_priv;
138 int i = 0; 138 int i = 0;
@@ -151,7 +151,7 @@ static int musb_ulpi_read(struct usb_phy *phy, u32 offset)
151 * ULPICarKitControlDisableUTMI after clearing POWER_SUSPENDM. 151 * ULPICarKitControlDisableUTMI after clearing POWER_SUSPENDM.
152 */ 152 */
153 153
154 musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset); 154 musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg);
155 musb_writeb(addr, MUSB_ULPI_REG_CONTROL, 155 musb_writeb(addr, MUSB_ULPI_REG_CONTROL,
156 MUSB_ULPI_REG_REQ | MUSB_ULPI_RDN_WR); 156 MUSB_ULPI_REG_REQ | MUSB_ULPI_RDN_WR);
157 157
@@ -176,7 +176,7 @@ out:
176 return ret; 176 return ret;
177} 177}
178 178
179static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data) 179static int musb_ulpi_write(struct usb_phy *phy, u32 val, u32 reg)
180{ 180{
181 void __iomem *addr = phy->io_priv; 181 void __iomem *addr = phy->io_priv;
182 int i = 0; 182 int i = 0;
@@ -191,8 +191,8 @@ static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data)
191 power &= ~MUSB_POWER_SUSPENDM; 191 power &= ~MUSB_POWER_SUSPENDM;
192 musb_writeb(addr, MUSB_POWER, power); 192 musb_writeb(addr, MUSB_POWER, power);
193 193
194 musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset); 194 musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg);
195 musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)data); 195 musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)val);
196 musb_writeb(addr, MUSB_ULPI_REG_CONTROL, MUSB_ULPI_REG_REQ); 196 musb_writeb(addr, MUSB_ULPI_REG_CONTROL, MUSB_ULPI_REG_REQ);
197 197
198 while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL) 198 while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
@@ -1668,7 +1668,7 @@ EXPORT_SYMBOL_GPL(musb_interrupt);
1668static bool use_dma = 1; 1668static bool use_dma = 1;
1669 1669
1670/* "modprobe ... use_dma=0" etc */ 1670/* "modprobe ... use_dma=0" etc */
1671module_param(use_dma, bool, 0); 1671module_param(use_dma, bool, 0644);
1672MODULE_PARM_DESC(use_dma, "enable/disable use of DMA"); 1672MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
1673 1673
1674void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit) 1674void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit)
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 26c65e66cc0f..795a45b1b25b 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -112,22 +112,32 @@ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
112 struct musb *musb = ep->musb; 112 struct musb *musb = ep->musb;
113 void __iomem *epio = ep->regs; 113 void __iomem *epio = ep->regs;
114 u16 csr; 114 u16 csr;
115 u16 lastcsr = 0;
116 int retries = 1000; 115 int retries = 1000;
117 116
118 csr = musb_readw(epio, MUSB_TXCSR); 117 csr = musb_readw(epio, MUSB_TXCSR);
119 while (csr & MUSB_TXCSR_FIFONOTEMPTY) { 118 while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
120 if (csr != lastcsr)
121 dev_dbg(musb->controller, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
122 lastcsr = csr;
123 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_TXPKTRDY; 119 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_TXPKTRDY;
124 musb_writew(epio, MUSB_TXCSR, csr); 120 musb_writew(epio, MUSB_TXCSR, csr);
125 csr = musb_readw(epio, MUSB_TXCSR); 121 csr = musb_readw(epio, MUSB_TXCSR);
126 if (WARN(retries-- < 1, 122
123 /*
124 * FIXME: sometimes the tx fifo flush failed, it has been
125 * observed during device disconnect on AM335x.
126 *
127 * To reproduce the issue, ensure tx urb(s) are queued when
128 * unplug the usb device which is connected to AM335x usb
129 * host port.
130 *
131 * I found using a usb-ethernet device and running iperf
132 * (client on AM335x) has very high chance to trigger it.
133 *
134 * Better to turn on dev_dbg() in musb_cleanup_urb() with
135 * CPPI enabled to see the issue when aborting the tx channel.
136 */
137 if (dev_WARN_ONCE(musb->controller, retries-- < 1,
127 "Could not flush host TX%d fifo: csr: %04x\n", 138 "Could not flush host TX%d fifo: csr: %04x\n",
128 ep->epnum, csr)) 139 ep->epnum, csr))
129 return; 140 return;
130 mdelay(1);
131 } 141 }
132} 142}
133 143
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 173132416170..22e8ecb6bfbd 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -21,7 +21,6 @@ config AB8500_USB
21config FSL_USB2_OTG 21config FSL_USB2_OTG
22 bool "Freescale USB OTG Transceiver Driver" 22 bool "Freescale USB OTG Transceiver Driver"
23 depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM 23 depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM
24 select USB_OTG
25 select USB_PHY 24 select USB_PHY
26 help 25 help
27 Enable this to support Freescale USB OTG transceiver. 26 Enable this to support Freescale USB OTG transceiver.
@@ -168,8 +167,7 @@ config USB_QCOM_8X16_PHY
168 167
169config USB_MV_OTG 168config USB_MV_OTG
170 tristate "Marvell USB OTG support" 169 tristate "Marvell USB OTG support"
171 depends on USB_EHCI_MV && USB_MV_UDC && PM 170 depends on USB_EHCI_MV && USB_MV_UDC && PM && USB_OTG
172 select USB_OTG
173 select USB_PHY 171 select USB_PHY
174 help 172 help
175 Say Y here if you want to build Marvell USB OTG transciever 173 Say Y here if you want to build Marvell USB OTG transciever
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index 4d863ebc117c..b7536af777ab 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -452,10 +452,13 @@ static int mxs_phy_probe(struct platform_device *pdev)
452 struct clk *clk; 452 struct clk *clk;
453 struct mxs_phy *mxs_phy; 453 struct mxs_phy *mxs_phy;
454 int ret; 454 int ret;
455 const struct of_device_id *of_id = 455 const struct of_device_id *of_id;
456 of_match_device(mxs_phy_dt_ids, &pdev->dev);
457 struct device_node *np = pdev->dev.of_node; 456 struct device_node *np = pdev->dev.of_node;
458 457
458 of_id = of_match_device(mxs_phy_dt_ids, &pdev->dev);
459 if (!of_id)
460 return -ENODEV;
461
459 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 462 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
460 base = devm_ioremap_resource(&pdev->dev, res); 463 base = devm_ioremap_resource(&pdev->dev, res);
461 if (IS_ERR(base)) 464 if (IS_ERR(base))
diff --git a/drivers/usb/phy/phy-omap-otg.c b/drivers/usb/phy/phy-omap-otg.c
index 1270906ccb95..c4bf2de6d14e 100644
--- a/drivers/usb/phy/phy-omap-otg.c
+++ b/drivers/usb/phy/phy-omap-otg.c
@@ -105,7 +105,6 @@ static int omap_otg_probe(struct platform_device *pdev)
105 extcon = extcon_get_extcon_dev(config->extcon); 105 extcon = extcon_get_extcon_dev(config->extcon);
106 if (!extcon) 106 if (!extcon)
107 return -EPROBE_DEFER; 107 return -EPROBE_DEFER;
108 otg_dev->extcon = extcon;
109 108
110 otg_dev = devm_kzalloc(&pdev->dev, sizeof(*otg_dev), GFP_KERNEL); 109 otg_dev = devm_kzalloc(&pdev->dev, sizeof(*otg_dev), GFP_KERNEL);
111 if (!otg_dev) 110 if (!otg_dev)
@@ -115,6 +114,7 @@ static int omap_otg_probe(struct platform_device *pdev)
115 if (IS_ERR(otg_dev->base)) 114 if (IS_ERR(otg_dev->base))
116 return PTR_ERR(otg_dev->base); 115 return PTR_ERR(otg_dev->base);
117 116
117 otg_dev->extcon = extcon;
118 otg_dev->id_nb.notifier_call = omap_otg_id_notifier; 118 otg_dev->id_nb.notifier_call = omap_otg_id_notifier;
119 otg_dev->vbus_nb.notifier_call = omap_otg_vbus_notifier; 119 otg_dev->vbus_nb.notifier_call = omap_otg_vbus_notifier;
120 120
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 685fef71d3d1..f2280606b73c 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -161,6 +161,7 @@ static void option_instat_callback(struct urb *urb);
161#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001 161#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001
162#define NOVATELWIRELESS_PRODUCT_E362 0x9010 162#define NOVATELWIRELESS_PRODUCT_E362 0x9010
163#define NOVATELWIRELESS_PRODUCT_E371 0x9011 163#define NOVATELWIRELESS_PRODUCT_E371 0x9011
164#define NOVATELWIRELESS_PRODUCT_U620L 0x9022
164#define NOVATELWIRELESS_PRODUCT_G2 0xA010 165#define NOVATELWIRELESS_PRODUCT_G2 0xA010
165#define NOVATELWIRELESS_PRODUCT_MC551 0xB001 166#define NOVATELWIRELESS_PRODUCT_MC551 0xB001
166 167
@@ -354,6 +355,7 @@ static void option_instat_callback(struct urb *urb);
354/* This is the 4G XS Stick W14 a.k.a. Mobilcom Debitel Surf-Stick * 355/* This is the 4G XS Stick W14 a.k.a. Mobilcom Debitel Surf-Stick *
355 * It seems to contain a Qualcomm QSC6240/6290 chipset */ 356 * It seems to contain a Qualcomm QSC6240/6290 chipset */
356#define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603 357#define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603
358#define FOUR_G_SYSTEMS_PRODUCT_W100 0x9b01
357 359
358/* iBall 3.5G connect wireless modem */ 360/* iBall 3.5G connect wireless modem */
359#define IBALL_3_5G_CONNECT 0x9605 361#define IBALL_3_5G_CONNECT 0x9605
@@ -519,6 +521,11 @@ static const struct option_blacklist_info four_g_w14_blacklist = {
519 .sendsetup = BIT(0) | BIT(1), 521 .sendsetup = BIT(0) | BIT(1),
520}; 522};
521 523
524static const struct option_blacklist_info four_g_w100_blacklist = {
525 .sendsetup = BIT(1) | BIT(2),
526 .reserved = BIT(3),
527};
528
522static const struct option_blacklist_info alcatel_x200_blacklist = { 529static const struct option_blacklist_info alcatel_x200_blacklist = {
523 .sendsetup = BIT(0) | BIT(1), 530 .sendsetup = BIT(0) | BIT(1),
524 .reserved = BIT(4), 531 .reserved = BIT(4),
@@ -1052,6 +1059,7 @@ static const struct usb_device_id option_ids[] = {
1052 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) }, 1059 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
1053 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) }, 1060 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) },
1054 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E371, 0xff, 0xff, 0xff) }, 1061 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E371, 0xff, 0xff, 0xff) },
1062 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U620L, 0xff, 0x00, 0x00) },
1055 1063
1056 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, 1064 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
1057 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, 1065 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
@@ -1641,6 +1649,9 @@ static const struct usb_device_id option_ids[] = {
1641 { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14), 1649 { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
1642 .driver_info = (kernel_ulong_t)&four_g_w14_blacklist 1650 .driver_info = (kernel_ulong_t)&four_g_w14_blacklist
1643 }, 1651 },
1652 { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100),
1653 .driver_info = (kernel_ulong_t)&four_g_w100_blacklist
1654 },
1644 { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) }, 1655 { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
1645 { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) }, 1656 { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
1646 { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) }, 1657 { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 5022fcfa0260..9919d2a9faf2 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -22,6 +22,8 @@
22#define DRIVER_AUTHOR "Qualcomm Inc" 22#define DRIVER_AUTHOR "Qualcomm Inc"
23#define DRIVER_DESC "Qualcomm USB Serial driver" 23#define DRIVER_DESC "Qualcomm USB Serial driver"
24 24
25#define QUECTEL_EC20_PID 0x9215
26
25/* standard device layouts supported by this driver */ 27/* standard device layouts supported by this driver */
26enum qcserial_layouts { 28enum qcserial_layouts {
27 QCSERIAL_G2K = 0, /* Gobi 2000 */ 29 QCSERIAL_G2K = 0, /* Gobi 2000 */
@@ -171,6 +173,38 @@ static const struct usb_device_id id_table[] = {
171}; 173};
172MODULE_DEVICE_TABLE(usb, id_table); 174MODULE_DEVICE_TABLE(usb, id_table);
173 175
176static int handle_quectel_ec20(struct device *dev, int ifnum)
177{
178 int altsetting = 0;
179
180 /*
181 * Quectel EC20 Mini PCIe LTE module layout:
182 * 0: DM/DIAG (use libqcdm from ModemManager for communication)
183 * 1: NMEA
184 * 2: AT-capable modem port
185 * 3: Modem interface
186 * 4: NDIS
187 */
188 switch (ifnum) {
189 case 0:
190 dev_dbg(dev, "Quectel EC20 DM/DIAG interface found\n");
191 break;
192 case 1:
193 dev_dbg(dev, "Quectel EC20 NMEA GPS interface found\n");
194 break;
195 case 2:
196 case 3:
197 dev_dbg(dev, "Quectel EC20 Modem port found\n");
198 break;
199 case 4:
200 /* Don't claim the QMI/net interface */
201 altsetting = -1;
202 break;
203 }
204
205 return altsetting;
206}
207
174static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) 208static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
175{ 209{
176 struct usb_host_interface *intf = serial->interface->cur_altsetting; 210 struct usb_host_interface *intf = serial->interface->cur_altsetting;
@@ -181,6 +215,10 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
181 int altsetting = -1; 215 int altsetting = -1;
182 bool sendsetup = false; 216 bool sendsetup = false;
183 217
218 /* we only support vendor specific functions */
219 if (intf->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC)
220 goto done;
221
184 nintf = serial->dev->actconfig->desc.bNumInterfaces; 222 nintf = serial->dev->actconfig->desc.bNumInterfaces;
185 dev_dbg(dev, "Num Interfaces = %d\n", nintf); 223 dev_dbg(dev, "Num Interfaces = %d\n", nintf);
186 ifnum = intf->desc.bInterfaceNumber; 224 ifnum = intf->desc.bInterfaceNumber;
@@ -240,6 +278,12 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
240 altsetting = -1; 278 altsetting = -1;
241 break; 279 break;
242 case QCSERIAL_G2K: 280 case QCSERIAL_G2K:
281 /* handle non-standard layouts */
282 if (nintf == 5 && id->idProduct == QUECTEL_EC20_PID) {
283 altsetting = handle_quectel_ec20(dev, ifnum);
284 goto done;
285 }
286
243 /* 287 /*
244 * Gobi 2K+ USB layout: 288 * Gobi 2K+ USB layout:
245 * 0: QMI/net 289 * 0: QMI/net
@@ -301,29 +345,39 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
301 break; 345 break;
302 case QCSERIAL_HWI: 346 case QCSERIAL_HWI:
303 /* 347 /*
304 * Huawei layout: 348 * Huawei devices map functions by subclass + protocol
305 * 0: AT-capable modem port 349 * instead of interface numbers. The protocol identify
306 * 1: DM/DIAG 350 * a specific function, while the subclass indicate a
307 * 2: AT-capable modem port 351 * specific firmware source
308 * 3: CCID-compatible PCSC interface 352 *
309 * 4: QMI/net 353 * This is a blacklist of functions known to be
310 * 5: NMEA 354 * non-serial. The rest are assumed to be serial and
355 * will be handled by this driver
311 */ 356 */
312 switch (ifnum) { 357 switch (intf->desc.bInterfaceProtocol) {
313 case 0: 358 /* QMI combined (qmi_wwan) */
314 case 2: 359 case 0x07:
315 dev_dbg(dev, "Modem port found\n"); 360 case 0x37:
316 break; 361 case 0x67:
317 case 1: 362 /* QMI data (qmi_wwan) */
318 dev_dbg(dev, "DM/DIAG interface found\n"); 363 case 0x08:
319 break; 364 case 0x38:
320 case 5: 365 case 0x68:
321 dev_dbg(dev, "NMEA GPS interface found\n"); 366 /* QMI control (qmi_wwan) */
322 break; 367 case 0x09:
323 default: 368 case 0x39:
324 /* don't claim any unsupported interface */ 369 case 0x69:
370 /* NCM like (huawei_cdc_ncm) */
371 case 0x16:
372 case 0x46:
373 case 0x76:
325 altsetting = -1; 374 altsetting = -1;
326 break; 375 break;
376 default:
377 dev_dbg(dev, "Huawei type serial port found (%02x/%02x/%02x)\n",
378 intf->desc.bInterfaceClass,
379 intf->desc.bInterfaceSubClass,
380 intf->desc.bInterfaceProtocol);
327 } 381 }
328 break; 382 break;
329 default: 383 default:
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index e9da41d9fe7f..2694df2f4559 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -159,6 +159,7 @@ static const struct usb_device_id ti_id_table_3410[] = {
159 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STEREO_PLUG_ID) }, 159 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STEREO_PLUG_ID) },
160 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) }, 160 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) },
161 { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, 161 { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
162 { USB_DEVICE(HONEYWELL_VENDOR_ID, HONEYWELL_HGI80_PRODUCT_ID) },
162 { } /* terminator */ 163 { } /* terminator */
163}; 164};
164 165
@@ -191,6 +192,7 @@ static const struct usb_device_id ti_id_table_combined[] = {
191 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) }, 192 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) },
192 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) }, 193 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) },
193 { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, 194 { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
195 { USB_DEVICE(HONEYWELL_VENDOR_ID, HONEYWELL_HGI80_PRODUCT_ID) },
194 { } /* terminator */ 196 { } /* terminator */
195}; 197};
196 198
diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h
index 4a2423e84d55..98f35c656c02 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.h
+++ b/drivers/usb/serial/ti_usb_3410_5052.h
@@ -56,6 +56,10 @@
56#define ABBOTT_PRODUCT_ID ABBOTT_STEREO_PLUG_ID 56#define ABBOTT_PRODUCT_ID ABBOTT_STEREO_PLUG_ID
57#define ABBOTT_STRIP_PORT_ID 0x3420 57#define ABBOTT_STRIP_PORT_ID 0x3420
58 58
59/* Honeywell vendor and product IDs */
60#define HONEYWELL_VENDOR_ID 0x10ac
61#define HONEYWELL_HGI80_PRODUCT_ID 0x0102 /* Honeywell HGI80 */
62
59/* Commands */ 63/* Commands */
60#define TI_GET_VERSION 0x01 64#define TI_GET_VERSION 0x01
61#define TI_GET_PORT_STATUS 0x02 65#define TI_GET_PORT_STATUS 0x02
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 7a8a6c6952e9..1c427beffadd 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -446,7 +446,7 @@ config MAX63XX_WATCHDOG
446 446
447config IMX2_WDT 447config IMX2_WDT
448 tristate "IMX2+ Watchdog" 448 tristate "IMX2+ Watchdog"
449 depends on ARCH_MXC 449 depends on ARCH_MXC || ARCH_LAYERSCAPE
450 select REGMAP_MMIO 450 select REGMAP_MMIO
451 select WATCHDOG_CORE 451 select WATCHDOG_CORE
452 help 452 help
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
index 6ad9df948711..b751f43d76ed 100644
--- a/drivers/watchdog/mtk_wdt.c
+++ b/drivers/watchdog/mtk_wdt.c
@@ -123,6 +123,7 @@ static int mtk_wdt_stop(struct watchdog_device *wdt_dev)
123 123
124 reg = readl(wdt_base + WDT_MODE); 124 reg = readl(wdt_base + WDT_MODE);
125 reg &= ~WDT_MODE_EN; 125 reg &= ~WDT_MODE_EN;
126 reg |= WDT_MODE_KEY;
126 iowrite32(reg, wdt_base + WDT_MODE); 127 iowrite32(reg, wdt_base + WDT_MODE);
127 128
128 return 0; 129 return 0;
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index d96bee017fd3..6f17c935a6cf 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -205,7 +205,7 @@ static int omap_wdt_set_timeout(struct watchdog_device *wdog,
205 205
206static unsigned int omap_wdt_get_timeleft(struct watchdog_device *wdog) 206static unsigned int omap_wdt_get_timeleft(struct watchdog_device *wdog)
207{ 207{
208 struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog); 208 struct omap_wdt_dev *wdev = to_omap_wdt_dev(wdog);
209 void __iomem *base = wdev->base; 209 void __iomem *base = wdev->base;
210 u32 value; 210 u32 value;
211 211
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index 4224b3ec83a5..313cd1c6fda0 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -80,7 +80,7 @@ static unsigned int heartbeat = DEFAULT_HEARTBEAT;
80 80
81static DEFINE_SPINLOCK(io_lock); 81static DEFINE_SPINLOCK(io_lock);
82static void __iomem *wdt_base; 82static void __iomem *wdt_base;
83struct clk *wdt_clk; 83static struct clk *wdt_clk;
84 84
85static int pnx4008_wdt_start(struct watchdog_device *wdd) 85static int pnx4008_wdt_start(struct watchdog_device *wdd)
86{ 86{
@@ -161,7 +161,7 @@ static int pnx4008_wdt_probe(struct platform_device *pdev)
161 if (IS_ERR(wdt_clk)) 161 if (IS_ERR(wdt_clk))
162 return PTR_ERR(wdt_clk); 162 return PTR_ERR(wdt_clk);
163 163
164 ret = clk_enable(wdt_clk); 164 ret = clk_prepare_enable(wdt_clk);
165 if (ret) 165 if (ret)
166 return ret; 166 return ret;
167 167
@@ -184,7 +184,7 @@ static int pnx4008_wdt_probe(struct platform_device *pdev)
184 return 0; 184 return 0;
185 185
186disable_clk: 186disable_clk:
187 clk_disable(wdt_clk); 187 clk_disable_unprepare(wdt_clk);
188 return ret; 188 return ret;
189} 189}
190 190
@@ -192,7 +192,7 @@ static int pnx4008_wdt_remove(struct platform_device *pdev)
192{ 192{
193 watchdog_unregister_device(&pnx4008_wdd); 193 watchdog_unregister_device(&pnx4008_wdd);
194 194
195 clk_disable(wdt_clk); 195 clk_disable_unprepare(wdt_clk);
196 196
197 return 0; 197 return 0;
198} 198}
diff --git a/drivers/watchdog/tegra_wdt.c b/drivers/watchdog/tegra_wdt.c
index 7f97cdd53f29..9ec57608da82 100644
--- a/drivers/watchdog/tegra_wdt.c
+++ b/drivers/watchdog/tegra_wdt.c
@@ -140,8 +140,10 @@ static int tegra_wdt_set_timeout(struct watchdog_device *wdd,
140{ 140{
141 wdd->timeout = timeout; 141 wdd->timeout = timeout;
142 142
143 if (watchdog_active(wdd)) 143 if (watchdog_active(wdd)) {
144 tegra_wdt_stop(wdd);
144 return tegra_wdt_start(wdd); 145 return tegra_wdt_start(wdd);
146 }
145 147
146 return 0; 148 return 0;
147} 149}
diff --git a/drivers/watchdog/w83977f_wdt.c b/drivers/watchdog/w83977f_wdt.c
index 91bf55a20024..20e2bba10400 100644
--- a/drivers/watchdog/w83977f_wdt.c
+++ b/drivers/watchdog/w83977f_wdt.c
@@ -224,7 +224,7 @@ static int wdt_keepalive(void)
224 224
225static int wdt_set_timeout(int t) 225static int wdt_set_timeout(int t)
226{ 226{
227 int tmrval; 227 unsigned int tmrval;
228 228
229 /* 229 /*
230 * Convert seconds to watchdog counter time units, rounding up. 230 * Convert seconds to watchdog counter time units, rounding up.
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 849500e4e14d..524c22146429 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -39,6 +39,7 @@
39#include <asm/irq.h> 39#include <asm/irq.h>
40#include <asm/idle.h> 40#include <asm/idle.h>
41#include <asm/io_apic.h> 41#include <asm/io_apic.h>
42#include <asm/i8259.h>
42#include <asm/xen/pci.h> 43#include <asm/xen/pci.h>
43#endif 44#endif
44#include <asm/sync_bitops.h> 45#include <asm/sync_bitops.h>
@@ -420,7 +421,7 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi)
420 return xen_allocate_irq_dynamic(); 421 return xen_allocate_irq_dynamic();
421 422
422 /* Legacy IRQ descriptors are already allocated by the arch. */ 423 /* Legacy IRQ descriptors are already allocated by the arch. */
423 if (gsi < NR_IRQS_LEGACY) 424 if (gsi < nr_legacy_irqs())
424 irq = gsi; 425 irq = gsi;
425 else 426 else
426 irq = irq_alloc_desc_at(gsi, -1); 427 irq = irq_alloc_desc_at(gsi, -1);
@@ -446,7 +447,7 @@ static void xen_free_irq(unsigned irq)
446 kfree(info); 447 kfree(info);
447 448
448 /* Legacy IRQ descriptors are managed by the arch. */ 449 /* Legacy IRQ descriptors are managed by the arch. */
449 if (irq < NR_IRQS_LEGACY) 450 if (irq < nr_legacy_irqs())
450 return; 451 return;
451 452
452 irq_free_desc(irq); 453 irq_free_desc(irq);
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 00f40f051d95..38272ad24551 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -49,6 +49,8 @@
49#include <linux/init.h> 49#include <linux/init.h>
50#include <linux/mutex.h> 50#include <linux/mutex.h>
51#include <linux/cpu.h> 51#include <linux/cpu.h>
52#include <linux/mm.h>
53#include <linux/vmalloc.h>
52 54
53#include <xen/xen.h> 55#include <xen/xen.h>
54#include <xen/events.h> 56#include <xen/events.h>
@@ -58,10 +60,10 @@
58struct per_user_data { 60struct per_user_data {
59 struct mutex bind_mutex; /* serialize bind/unbind operations */ 61 struct mutex bind_mutex; /* serialize bind/unbind operations */
60 struct rb_root evtchns; 62 struct rb_root evtchns;
63 unsigned int nr_evtchns;
61 64
62 /* Notification ring, accessed via /dev/xen/evtchn. */ 65 /* Notification ring, accessed via /dev/xen/evtchn. */
63#define EVTCHN_RING_SIZE (PAGE_SIZE / sizeof(evtchn_port_t)) 66 unsigned int ring_size;
64#define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1))
65 evtchn_port_t *ring; 67 evtchn_port_t *ring;
66 unsigned int ring_cons, ring_prod, ring_overflow; 68 unsigned int ring_cons, ring_prod, ring_overflow;
67 struct mutex ring_cons_mutex; /* protect against concurrent readers */ 69 struct mutex ring_cons_mutex; /* protect against concurrent readers */
@@ -80,10 +82,41 @@ struct user_evtchn {
80 bool enabled; 82 bool enabled;
81}; 83};
82 84
85static evtchn_port_t *evtchn_alloc_ring(unsigned int size)
86{
87 evtchn_port_t *ring;
88 size_t s = size * sizeof(*ring);
89
90 ring = kmalloc(s, GFP_KERNEL);
91 if (!ring)
92 ring = vmalloc(s);
93
94 return ring;
95}
96
97static void evtchn_free_ring(evtchn_port_t *ring)
98{
99 kvfree(ring);
100}
101
102static unsigned int evtchn_ring_offset(struct per_user_data *u,
103 unsigned int idx)
104{
105 return idx & (u->ring_size - 1);
106}
107
108static evtchn_port_t *evtchn_ring_entry(struct per_user_data *u,
109 unsigned int idx)
110{
111 return u->ring + evtchn_ring_offset(u, idx);
112}
113
83static int add_evtchn(struct per_user_data *u, struct user_evtchn *evtchn) 114static int add_evtchn(struct per_user_data *u, struct user_evtchn *evtchn)
84{ 115{
85 struct rb_node **new = &(u->evtchns.rb_node), *parent = NULL; 116 struct rb_node **new = &(u->evtchns.rb_node), *parent = NULL;
86 117
118 u->nr_evtchns++;
119
87 while (*new) { 120 while (*new) {
88 struct user_evtchn *this; 121 struct user_evtchn *this;
89 122
@@ -107,6 +140,7 @@ static int add_evtchn(struct per_user_data *u, struct user_evtchn *evtchn)
107 140
108static void del_evtchn(struct per_user_data *u, struct user_evtchn *evtchn) 141static void del_evtchn(struct per_user_data *u, struct user_evtchn *evtchn)
109{ 142{
143 u->nr_evtchns--;
110 rb_erase(&evtchn->node, &u->evtchns); 144 rb_erase(&evtchn->node, &u->evtchns);
111 kfree(evtchn); 145 kfree(evtchn);
112} 146}
@@ -144,8 +178,8 @@ static irqreturn_t evtchn_interrupt(int irq, void *data)
144 178
145 spin_lock(&u->ring_prod_lock); 179 spin_lock(&u->ring_prod_lock);
146 180
147 if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) { 181 if ((u->ring_prod - u->ring_cons) < u->ring_size) {
148 u->ring[EVTCHN_RING_MASK(u->ring_prod)] = evtchn->port; 182 *evtchn_ring_entry(u, u->ring_prod) = evtchn->port;
149 wmb(); /* Ensure ring contents visible */ 183 wmb(); /* Ensure ring contents visible */
150 if (u->ring_cons == u->ring_prod++) { 184 if (u->ring_cons == u->ring_prod++) {
151 wake_up_interruptible(&u->evtchn_wait); 185 wake_up_interruptible(&u->evtchn_wait);
@@ -200,10 +234,10 @@ static ssize_t evtchn_read(struct file *file, char __user *buf,
200 } 234 }
201 235
202 /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */ 236 /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
203 if (((c ^ p) & EVTCHN_RING_SIZE) != 0) { 237 if (((c ^ p) & u->ring_size) != 0) {
204 bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) * 238 bytes1 = (u->ring_size - evtchn_ring_offset(u, c)) *
205 sizeof(evtchn_port_t); 239 sizeof(evtchn_port_t);
206 bytes2 = EVTCHN_RING_MASK(p) * sizeof(evtchn_port_t); 240 bytes2 = evtchn_ring_offset(u, p) * sizeof(evtchn_port_t);
207 } else { 241 } else {
208 bytes1 = (p - c) * sizeof(evtchn_port_t); 242 bytes1 = (p - c) * sizeof(evtchn_port_t);
209 bytes2 = 0; 243 bytes2 = 0;
@@ -219,7 +253,7 @@ static ssize_t evtchn_read(struct file *file, char __user *buf,
219 253
220 rc = -EFAULT; 254 rc = -EFAULT;
221 rmb(); /* Ensure that we see the port before we copy it. */ 255 rmb(); /* Ensure that we see the port before we copy it. */
222 if (copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) || 256 if (copy_to_user(buf, evtchn_ring_entry(u, c), bytes1) ||
223 ((bytes2 != 0) && 257 ((bytes2 != 0) &&
224 copy_to_user(&buf[bytes1], &u->ring[0], bytes2))) 258 copy_to_user(&buf[bytes1], &u->ring[0], bytes2)))
225 goto unlock_out; 259 goto unlock_out;
@@ -278,6 +312,66 @@ static ssize_t evtchn_write(struct file *file, const char __user *buf,
278 return rc; 312 return rc;
279} 313}
280 314
315static int evtchn_resize_ring(struct per_user_data *u)
316{
317 unsigned int new_size;
318 evtchn_port_t *new_ring, *old_ring;
319 unsigned int p, c;
320
321 /*
322 * Ensure the ring is large enough to capture all possible
323 * events. i.e., one free slot for each bound event.
324 */
325 if (u->nr_evtchns <= u->ring_size)
326 return 0;
327
328 if (u->ring_size == 0)
329 new_size = 64;
330 else
331 new_size = 2 * u->ring_size;
332
333 new_ring = evtchn_alloc_ring(new_size);
334 if (!new_ring)
335 return -ENOMEM;
336
337 old_ring = u->ring;
338
339 /*
340 * Access to the ring contents is serialized by either the
341 * prod /or/ cons lock so take both when resizing.
342 */
343 mutex_lock(&u->ring_cons_mutex);
344 spin_lock_irq(&u->ring_prod_lock);
345
346 /*
347 * Copy the old ring contents to the new ring.
348 *
349 * If the ring contents crosses the end of the current ring,
350 * it needs to be copied in two chunks.
351 *
352 * +---------+ +------------------+
353 * |34567 12| -> | 1234567 |
354 * +-----p-c-+ +------------------+
355 */
356 p = evtchn_ring_offset(u, u->ring_prod);
357 c = evtchn_ring_offset(u, u->ring_cons);
358 if (p < c) {
359 memcpy(new_ring + c, u->ring + c, (u->ring_size - c) * sizeof(*u->ring));
360 memcpy(new_ring + u->ring_size, u->ring, p * sizeof(*u->ring));
361 } else
362 memcpy(new_ring + c, u->ring + c, (p - c) * sizeof(*u->ring));
363
364 u->ring = new_ring;
365 u->ring_size = new_size;
366
367 spin_unlock_irq(&u->ring_prod_lock);
368 mutex_unlock(&u->ring_cons_mutex);
369
370 evtchn_free_ring(old_ring);
371
372 return 0;
373}
374
281static int evtchn_bind_to_user(struct per_user_data *u, int port) 375static int evtchn_bind_to_user(struct per_user_data *u, int port)
282{ 376{
283 struct user_evtchn *evtchn; 377 struct user_evtchn *evtchn;
@@ -305,6 +399,10 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port)
305 if (rc < 0) 399 if (rc < 0)
306 goto err; 400 goto err;
307 401
402 rc = evtchn_resize_ring(u);
403 if (rc < 0)
404 goto err;
405
308 rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, 0, 406 rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, 0,
309 u->name, evtchn); 407 u->name, evtchn);
310 if (rc < 0) 408 if (rc < 0)
@@ -503,13 +601,6 @@ static int evtchn_open(struct inode *inode, struct file *filp)
503 601
504 init_waitqueue_head(&u->evtchn_wait); 602 init_waitqueue_head(&u->evtchn_wait);
505 603
506 u->ring = (evtchn_port_t *)__get_free_page(GFP_KERNEL);
507 if (u->ring == NULL) {
508 kfree(u->name);
509 kfree(u);
510 return -ENOMEM;
511 }
512
513 mutex_init(&u->bind_mutex); 604 mutex_init(&u->bind_mutex);
514 mutex_init(&u->ring_cons_mutex); 605 mutex_init(&u->ring_cons_mutex);
515 spin_lock_init(&u->ring_prod_lock); 606 spin_lock_init(&u->ring_prod_lock);
@@ -532,7 +623,7 @@ static int evtchn_release(struct inode *inode, struct file *filp)
532 evtchn_unbind_from_user(u, evtchn); 623 evtchn_unbind_from_user(u, evtchn);
533 } 624 }
534 625
535 free_page((unsigned long)u->ring); 626 evtchn_free_ring(u->ring);
536 kfree(u->name); 627 kfree(u->name);
537 kfree(u); 628 kfree(u);
538 629
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 2ea0b3b2a91d..1be5dd048622 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -804,7 +804,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
804 804
805 vma->vm_ops = &gntdev_vmops; 805 vma->vm_ops = &gntdev_vmops;
806 806
807 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 807 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
808 808
809 if (use_ptemod) 809 if (use_ptemod)
810 vma->vm_flags |= VM_DONTCOPY; 810 vma->vm_flags |= VM_DONTCOPY;
diff --git a/fs/Kconfig b/fs/Kconfig
index da3f32f1a4e4..6ce72d8d1ee1 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -46,6 +46,12 @@ config FS_DAX
46 or if unsure, say N. Saying Y will increase the size of the kernel 46 or if unsure, say N. Saying Y will increase the size of the kernel
47 by about 5kB. 47 by about 5kB.
48 48
49config FS_DAX_PMD
50 bool
51 default FS_DAX
52 depends on FS_DAX
53 depends on BROKEN
54
49endif # BLOCK 55endif # BLOCK
50 56
51# Posix ACL utility routines 57# Posix ACL utility routines
diff --git a/fs/block_dev.c b/fs/block_dev.c
index bb0dfb1c7af1..c25639e907bd 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -390,9 +390,17 @@ int bdev_read_page(struct block_device *bdev, sector_t sector,
390 struct page *page) 390 struct page *page)
391{ 391{
392 const struct block_device_operations *ops = bdev->bd_disk->fops; 392 const struct block_device_operations *ops = bdev->bd_disk->fops;
393 int result = -EOPNOTSUPP;
394
393 if (!ops->rw_page || bdev_get_integrity(bdev)) 395 if (!ops->rw_page || bdev_get_integrity(bdev))
394 return -EOPNOTSUPP; 396 return result;
395 return ops->rw_page(bdev, sector + get_start_sect(bdev), page, READ); 397
398 result = blk_queue_enter(bdev->bd_queue, GFP_KERNEL);
399 if (result)
400 return result;
401 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, READ);
402 blk_queue_exit(bdev->bd_queue);
403 return result;
396} 404}
397EXPORT_SYMBOL_GPL(bdev_read_page); 405EXPORT_SYMBOL_GPL(bdev_read_page);
398 406
@@ -421,14 +429,20 @@ int bdev_write_page(struct block_device *bdev, sector_t sector,
421 int result; 429 int result;
422 int rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE; 430 int rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE;
423 const struct block_device_operations *ops = bdev->bd_disk->fops; 431 const struct block_device_operations *ops = bdev->bd_disk->fops;
432
424 if (!ops->rw_page || bdev_get_integrity(bdev)) 433 if (!ops->rw_page || bdev_get_integrity(bdev))
425 return -EOPNOTSUPP; 434 return -EOPNOTSUPP;
435 result = blk_queue_enter(bdev->bd_queue, GFP_KERNEL);
436 if (result)
437 return result;
438
426 set_page_writeback(page); 439 set_page_writeback(page);
427 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, rw); 440 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, rw);
428 if (result) 441 if (result)
429 end_page_writeback(page); 442 end_page_writeback(page);
430 else 443 else
431 unlock_page(page); 444 unlock_page(page);
445 blk_queue_exit(bdev->bd_queue);
432 return result; 446 return result;
433} 447}
434EXPORT_SYMBOL_GPL(bdev_write_page); 448EXPORT_SYMBOL_GPL(bdev_write_page);
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 6dcdb2ec9211..d453d62ab0c6 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -355,7 +355,7 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
355 355
356 index = srcu_read_lock(&fs_info->subvol_srcu); 356 index = srcu_read_lock(&fs_info->subvol_srcu);
357 357
358 root = btrfs_read_fs_root_no_name(fs_info, &root_key); 358 root = btrfs_get_fs_root(fs_info, &root_key, false);
359 if (IS_ERR(root)) { 359 if (IS_ERR(root)) {
360 srcu_read_unlock(&fs_info->subvol_srcu, index); 360 srcu_read_unlock(&fs_info->subvol_srcu, index);
361 ret = PTR_ERR(root); 361 ret = PTR_ERR(root);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 8c58191249cc..35489e7129a7 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -3416,6 +3416,7 @@ int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
3416struct btrfs_block_group_cache *btrfs_lookup_block_group( 3416struct btrfs_block_group_cache *btrfs_lookup_block_group(
3417 struct btrfs_fs_info *info, 3417 struct btrfs_fs_info *info,
3418 u64 bytenr); 3418 u64 bytenr);
3419void btrfs_get_block_group(struct btrfs_block_group_cache *cache);
3419void btrfs_put_block_group(struct btrfs_block_group_cache *cache); 3420void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
3420int get_block_group_index(struct btrfs_block_group_cache *cache); 3421int get_block_group_index(struct btrfs_block_group_cache *cache);
3421struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, 3422struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
@@ -3479,6 +3480,9 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
3479 struct btrfs_root *root, u64 bytes_used, 3480 struct btrfs_root *root, u64 bytes_used,
3480 u64 type, u64 chunk_objectid, u64 chunk_offset, 3481 u64 type, u64 chunk_objectid, u64 chunk_offset,
3481 u64 size); 3482 u64 size);
3483struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
3484 struct btrfs_fs_info *fs_info,
3485 const u64 chunk_offset);
3482int btrfs_remove_block_group(struct btrfs_trans_handle *trans, 3486int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
3483 struct btrfs_root *root, u64 group_start, 3487 struct btrfs_root *root, u64 group_start,
3484 struct extent_map *em); 3488 struct extent_map *em);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index acf3ed11cfb6..4b89680a1923 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -124,7 +124,7 @@ static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
124 return (cache->flags & bits) == bits; 124 return (cache->flags & bits) == bits;
125} 125}
126 126
127static void btrfs_get_block_group(struct btrfs_block_group_cache *cache) 127void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
128{ 128{
129 atomic_inc(&cache->count); 129 atomic_inc(&cache->count);
130} 130}
@@ -5915,19 +5915,6 @@ static int update_block_group(struct btrfs_trans_handle *trans,
5915 set_extent_dirty(info->pinned_extents, 5915 set_extent_dirty(info->pinned_extents,
5916 bytenr, bytenr + num_bytes - 1, 5916 bytenr, bytenr + num_bytes - 1,
5917 GFP_NOFS | __GFP_NOFAIL); 5917 GFP_NOFS | __GFP_NOFAIL);
5918 /*
5919 * No longer have used bytes in this block group, queue
5920 * it for deletion.
5921 */
5922 if (old_val == 0) {
5923 spin_lock(&info->unused_bgs_lock);
5924 if (list_empty(&cache->bg_list)) {
5925 btrfs_get_block_group(cache);
5926 list_add_tail(&cache->bg_list,
5927 &info->unused_bgs);
5928 }
5929 spin_unlock(&info->unused_bgs_lock);
5930 }
5931 } 5918 }
5932 5919
5933 spin_lock(&trans->transaction->dirty_bgs_lock); 5920 spin_lock(&trans->transaction->dirty_bgs_lock);
@@ -5939,6 +5926,22 @@ static int update_block_group(struct btrfs_trans_handle *trans,
5939 } 5926 }
5940 spin_unlock(&trans->transaction->dirty_bgs_lock); 5927 spin_unlock(&trans->transaction->dirty_bgs_lock);
5941 5928
5929 /*
5930 * No longer have used bytes in this block group, queue it for
5931 * deletion. We do this after adding the block group to the
5932 * dirty list to avoid races between cleaner kthread and space
5933 * cache writeout.
5934 */
5935 if (!alloc && old_val == 0) {
5936 spin_lock(&info->unused_bgs_lock);
5937 if (list_empty(&cache->bg_list)) {
5938 btrfs_get_block_group(cache);
5939 list_add_tail(&cache->bg_list,
5940 &info->unused_bgs);
5941 }
5942 spin_unlock(&info->unused_bgs_lock);
5943 }
5944
5942 btrfs_put_block_group(cache); 5945 btrfs_put_block_group(cache);
5943 total -= num_bytes; 5946 total -= num_bytes;
5944 bytenr += num_bytes; 5947 bytenr += num_bytes;
@@ -8105,21 +8108,47 @@ reada:
8105} 8108}
8106 8109
8107/* 8110/*
8108 * TODO: Modify related function to add related node/leaf to dirty_extent_root, 8111 * These may not be seen by the usual inc/dec ref code so we have to
8109 * for later qgroup accounting. 8112 * add them here.
8110 *
8111 * Current, this function does nothing.
8112 */ 8113 */
8114static int record_one_subtree_extent(struct btrfs_trans_handle *trans,
8115 struct btrfs_root *root, u64 bytenr,
8116 u64 num_bytes)
8117{
8118 struct btrfs_qgroup_extent_record *qrecord;
8119 struct btrfs_delayed_ref_root *delayed_refs;
8120
8121 qrecord = kmalloc(sizeof(*qrecord), GFP_NOFS);
8122 if (!qrecord)
8123 return -ENOMEM;
8124
8125 qrecord->bytenr = bytenr;
8126 qrecord->num_bytes = num_bytes;
8127 qrecord->old_roots = NULL;
8128
8129 delayed_refs = &trans->transaction->delayed_refs;
8130 spin_lock(&delayed_refs->lock);
8131 if (btrfs_qgroup_insert_dirty_extent(delayed_refs, qrecord))
8132 kfree(qrecord);
8133 spin_unlock(&delayed_refs->lock);
8134
8135 return 0;
8136}
8137
8113static int account_leaf_items(struct btrfs_trans_handle *trans, 8138static int account_leaf_items(struct btrfs_trans_handle *trans,
8114 struct btrfs_root *root, 8139 struct btrfs_root *root,
8115 struct extent_buffer *eb) 8140 struct extent_buffer *eb)
8116{ 8141{
8117 int nr = btrfs_header_nritems(eb); 8142 int nr = btrfs_header_nritems(eb);
8118 int i, extent_type; 8143 int i, extent_type, ret;
8119 struct btrfs_key key; 8144 struct btrfs_key key;
8120 struct btrfs_file_extent_item *fi; 8145 struct btrfs_file_extent_item *fi;
8121 u64 bytenr, num_bytes; 8146 u64 bytenr, num_bytes;
8122 8147
8148 /* We can be called directly from walk_up_proc() */
8149 if (!root->fs_info->quota_enabled)
8150 return 0;
8151
8123 for (i = 0; i < nr; i++) { 8152 for (i = 0; i < nr; i++) {
8124 btrfs_item_key_to_cpu(eb, &key, i); 8153 btrfs_item_key_to_cpu(eb, &key, i);
8125 8154
@@ -8138,6 +8167,10 @@ static int account_leaf_items(struct btrfs_trans_handle *trans,
8138 continue; 8167 continue;
8139 8168
8140 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi); 8169 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
8170
8171 ret = record_one_subtree_extent(trans, root, bytenr, num_bytes);
8172 if (ret)
8173 return ret;
8141 } 8174 }
8142 return 0; 8175 return 0;
8143} 8176}
@@ -8206,8 +8239,6 @@ static int adjust_slots_upwards(struct btrfs_root *root,
8206 8239
8207/* 8240/*
8208 * root_eb is the subtree root and is locked before this function is called. 8241 * root_eb is the subtree root and is locked before this function is called.
8209 * TODO: Modify this function to mark all (including complete shared node)
8210 * to dirty_extent_root to allow it get accounted in qgroup.
8211 */ 8242 */
8212static int account_shared_subtree(struct btrfs_trans_handle *trans, 8243static int account_shared_subtree(struct btrfs_trans_handle *trans,
8213 struct btrfs_root *root, 8244 struct btrfs_root *root,
@@ -8285,6 +8316,11 @@ walk_down:
8285 btrfs_tree_read_lock(eb); 8316 btrfs_tree_read_lock(eb);
8286 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); 8317 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
8287 path->locks[level] = BTRFS_READ_LOCK_BLOCKING; 8318 path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
8319
8320 ret = record_one_subtree_extent(trans, root, child_bytenr,
8321 root->nodesize);
8322 if (ret)
8323 goto out;
8288 } 8324 }
8289 8325
8290 if (level == 0) { 8326 if (level == 0) {
@@ -10256,6 +10292,47 @@ out:
10256 return ret; 10292 return ret;
10257} 10293}
10258 10294
10295struct btrfs_trans_handle *
10296btrfs_start_trans_remove_block_group(struct btrfs_fs_info *fs_info,
10297 const u64 chunk_offset)
10298{
10299 struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
10300 struct extent_map *em;
10301 struct map_lookup *map;
10302 unsigned int num_items;
10303
10304 read_lock(&em_tree->lock);
10305 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
10306 read_unlock(&em_tree->lock);
10307 ASSERT(em && em->start == chunk_offset);
10308
10309 /*
10310 * We need to reserve 3 + N units from the metadata space info in order
10311 * to remove a block group (done at btrfs_remove_chunk() and at
10312 * btrfs_remove_block_group()), which are used for:
10313 *
10314 * 1 unit for adding the free space inode's orphan (located in the tree
10315 * of tree roots).
10316 * 1 unit for deleting the block group item (located in the extent
10317 * tree).
10318 * 1 unit for deleting the free space item (located in tree of tree
10319 * roots).
10320 * N units for deleting N device extent items corresponding to each
10321 * stripe (located in the device tree).
10322 *
10323 * In order to remove a block group we also need to reserve units in the
10324 * system space info in order to update the chunk tree (update one or
10325 * more device items and remove one chunk item), but this is done at
10326 * btrfs_remove_chunk() through a call to check_system_chunk().
10327 */
10328 map = (struct map_lookup *)em->bdev;
10329 num_items = 3 + map->num_stripes;
10330 free_extent_map(em);
10331
10332 return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root,
10333 num_items, 1);
10334}
10335
10259/* 10336/*
10260 * Process the unused_bgs list and remove any that don't have any allocated 10337 * Process the unused_bgs list and remove any that don't have any allocated
10261 * space inside of them. 10338 * space inside of them.
@@ -10322,8 +10399,8 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
10322 * Want to do this before we do anything else so we can recover 10399 * Want to do this before we do anything else so we can recover
10323 * properly if we fail to join the transaction. 10400 * properly if we fail to join the transaction.
10324 */ 10401 */
10325 /* 1 for btrfs_orphan_reserve_metadata() */ 10402 trans = btrfs_start_trans_remove_block_group(fs_info,
10326 trans = btrfs_start_transaction(root, 1); 10403 block_group->key.objectid);
10327 if (IS_ERR(trans)) { 10404 if (IS_ERR(trans)) {
10328 btrfs_dec_block_group_ro(root, block_group); 10405 btrfs_dec_block_group_ro(root, block_group);
10329 ret = PTR_ERR(trans); 10406 ret = PTR_ERR(trans);
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 977e715f0bf2..72e73461c064 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1882,8 +1882,13 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1882 struct btrfs_log_ctx ctx; 1882 struct btrfs_log_ctx ctx;
1883 int ret = 0; 1883 int ret = 0;
1884 bool full_sync = 0; 1884 bool full_sync = 0;
1885 const u64 len = end - start + 1; 1885 u64 len;
1886 1886
1887 /*
1888 * The range length can be represented by u64, we have to do the typecasts
1889 * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync()
1890 */
1891 len = (u64)end - (u64)start + 1;
1887 trace_btrfs_sync_file(file, datasync); 1892 trace_btrfs_sync_file(file, datasync);
1888 1893
1889 /* 1894 /*
@@ -2071,8 +2076,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
2071 } 2076 }
2072 } 2077 }
2073 if (!full_sync) { 2078 if (!full_sync) {
2074 ret = btrfs_wait_ordered_range(inode, start, 2079 ret = btrfs_wait_ordered_range(inode, start, len);
2075 end - start + 1);
2076 if (ret) { 2080 if (ret) {
2077 btrfs_end_transaction(trans, root); 2081 btrfs_end_transaction(trans, root);
2078 goto out; 2082 goto out;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 994490d5fa64..a70c5790f8f5 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -4046,9 +4046,7 @@ int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4046 */ 4046 */
4047static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir) 4047static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir)
4048{ 4048{
4049 struct btrfs_trans_handle *trans;
4050 struct btrfs_root *root = BTRFS_I(dir)->root; 4049 struct btrfs_root *root = BTRFS_I(dir)->root;
4051 int ret;
4052 4050
4053 /* 4051 /*
4054 * 1 for the possible orphan item 4052 * 1 for the possible orphan item
@@ -4057,27 +4055,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir)
4057 * 1 for the inode ref 4055 * 1 for the inode ref
4058 * 1 for the inode 4056 * 1 for the inode
4059 */ 4057 */
4060 trans = btrfs_start_transaction(root, 5); 4058 return btrfs_start_transaction_fallback_global_rsv(root, 5, 5);
4061 if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
4062 return trans;
4063
4064 if (PTR_ERR(trans) == -ENOSPC) {
4065 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5);
4066
4067 trans = btrfs_start_transaction(root, 0);
4068 if (IS_ERR(trans))
4069 return trans;
4070 ret = btrfs_cond_migrate_bytes(root->fs_info,
4071 &root->fs_info->trans_block_rsv,
4072 num_bytes, 5);
4073 if (ret) {
4074 btrfs_end_transaction(trans, root);
4075 return ERR_PTR(ret);
4076 }
4077 trans->block_rsv = &root->fs_info->trans_block_rsv;
4078 trans->bytes_reserved = num_bytes;
4079 }
4080 return trans;
4081} 4059}
4082 4060
4083static int btrfs_unlink(struct inode *dir, struct dentry *dentry) 4061static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 93e12c18ffd7..5279fdae7142 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -993,9 +993,10 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans,
993 mutex_lock(&fs_info->qgroup_ioctl_lock); 993 mutex_lock(&fs_info->qgroup_ioctl_lock);
994 if (!fs_info->quota_root) 994 if (!fs_info->quota_root)
995 goto out; 995 goto out;
996 spin_lock(&fs_info->qgroup_lock);
997 fs_info->quota_enabled = 0; 996 fs_info->quota_enabled = 0;
998 fs_info->pending_quota_state = 0; 997 fs_info->pending_quota_state = 0;
998 btrfs_qgroup_wait_for_completion(fs_info);
999 spin_lock(&fs_info->qgroup_lock);
999 quota_root = fs_info->quota_root; 1000 quota_root = fs_info->quota_root;
1000 fs_info->quota_root = NULL; 1001 fs_info->quota_root = NULL;
1001 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON; 1002 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
@@ -1461,6 +1462,8 @@ struct btrfs_qgroup_extent_record
1461 struct btrfs_qgroup_extent_record *entry; 1462 struct btrfs_qgroup_extent_record *entry;
1462 u64 bytenr = record->bytenr; 1463 u64 bytenr = record->bytenr;
1463 1464
1465 assert_spin_locked(&delayed_refs->lock);
1466
1464 while (*p) { 1467 while (*p) {
1465 parent_node = *p; 1468 parent_node = *p;
1466 entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record, 1469 entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record,
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 2907a77fb1f6..b091d94ceef6 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -3432,7 +3432,9 @@ out:
3432static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, 3432static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
3433 struct btrfs_device *scrub_dev, 3433 struct btrfs_device *scrub_dev,
3434 u64 chunk_offset, u64 length, 3434 u64 chunk_offset, u64 length,
3435 u64 dev_offset, int is_dev_replace) 3435 u64 dev_offset,
3436 struct btrfs_block_group_cache *cache,
3437 int is_dev_replace)
3436{ 3438{
3437 struct btrfs_mapping_tree *map_tree = 3439 struct btrfs_mapping_tree *map_tree =
3438 &sctx->dev_root->fs_info->mapping_tree; 3440 &sctx->dev_root->fs_info->mapping_tree;
@@ -3445,8 +3447,18 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
3445 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1); 3447 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3446 read_unlock(&map_tree->map_tree.lock); 3448 read_unlock(&map_tree->map_tree.lock);
3447 3449
3448 if (!em) 3450 if (!em) {
3449 return -EINVAL; 3451 /*
3452 * Might have been an unused block group deleted by the cleaner
3453 * kthread or relocation.
3454 */
3455 spin_lock(&cache->lock);
3456 if (!cache->removed)
3457 ret = -EINVAL;
3458 spin_unlock(&cache->lock);
3459
3460 return ret;
3461 }
3450 3462
3451 map = (struct map_lookup *)em->bdev; 3463 map = (struct map_lookup *)em->bdev;
3452 if (em->start != chunk_offset) 3464 if (em->start != chunk_offset)
@@ -3483,6 +3495,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3483 u64 length; 3495 u64 length;
3484 u64 chunk_offset; 3496 u64 chunk_offset;
3485 int ret = 0; 3497 int ret = 0;
3498 int ro_set;
3486 int slot; 3499 int slot;
3487 struct extent_buffer *l; 3500 struct extent_buffer *l;
3488 struct btrfs_key key; 3501 struct btrfs_key key;
@@ -3568,7 +3581,21 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3568 scrub_pause_on(fs_info); 3581 scrub_pause_on(fs_info);
3569 ret = btrfs_inc_block_group_ro(root, cache); 3582 ret = btrfs_inc_block_group_ro(root, cache);
3570 scrub_pause_off(fs_info); 3583 scrub_pause_off(fs_info);
3571 if (ret) { 3584
3585 if (ret == 0) {
3586 ro_set = 1;
3587 } else if (ret == -ENOSPC) {
3588 /*
3589 * btrfs_inc_block_group_ro return -ENOSPC when it
3590 * failed in creating new chunk for metadata.
3591 * It is not a problem for scrub/replace, because
3592 * metadata are always cowed, and our scrub paused
3593 * commit_transactions.
3594 */
3595 ro_set = 0;
3596 } else {
3597 btrfs_warn(fs_info, "failed setting block group ro, ret=%d\n",
3598 ret);
3572 btrfs_put_block_group(cache); 3599 btrfs_put_block_group(cache);
3573 break; 3600 break;
3574 } 3601 }
@@ -3577,7 +3604,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3577 dev_replace->cursor_left = found_key.offset; 3604 dev_replace->cursor_left = found_key.offset;
3578 dev_replace->item_needs_writeback = 1; 3605 dev_replace->item_needs_writeback = 1;
3579 ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length, 3606 ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
3580 found_key.offset, is_dev_replace); 3607 found_key.offset, cache, is_dev_replace);
3581 3608
3582 /* 3609 /*
3583 * flush, submit all pending read and write bios, afterwards 3610 * flush, submit all pending read and write bios, afterwards
@@ -3611,7 +3638,30 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3611 3638
3612 scrub_pause_off(fs_info); 3639 scrub_pause_off(fs_info);
3613 3640
3614 btrfs_dec_block_group_ro(root, cache); 3641 if (ro_set)
3642 btrfs_dec_block_group_ro(root, cache);
3643
3644 /*
3645 * We might have prevented the cleaner kthread from deleting
3646 * this block group if it was already unused because we raced
3647 * and set it to RO mode first. So add it back to the unused
3648 * list, otherwise it might not ever be deleted unless a manual
3649 * balance is triggered or it becomes used and unused again.
3650 */
3651 spin_lock(&cache->lock);
3652 if (!cache->removed && !cache->ro && cache->reserved == 0 &&
3653 btrfs_block_group_used(&cache->item) == 0) {
3654 spin_unlock(&cache->lock);
3655 spin_lock(&fs_info->unused_bgs_lock);
3656 if (list_empty(&cache->bg_list)) {
3657 btrfs_get_block_group(cache);
3658 list_add_tail(&cache->bg_list,
3659 &fs_info->unused_bgs);
3660 }
3661 spin_unlock(&fs_info->unused_bgs_lock);
3662 } else {
3663 spin_unlock(&cache->lock);
3664 }
3615 3665
3616 btrfs_put_block_group(cache); 3666 btrfs_put_block_group(cache);
3617 if (ret) 3667 if (ret)
diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
index c8c3d70c31ff..8b72b005bfb9 100644
--- a/fs/btrfs/tests/free-space-tests.c
+++ b/fs/btrfs/tests/free-space-tests.c
@@ -898,8 +898,10 @@ int btrfs_test_free_space_cache(void)
898 } 898 }
899 899
900 root = btrfs_alloc_dummy_root(); 900 root = btrfs_alloc_dummy_root();
901 if (!root) 901 if (IS_ERR(root)) {
902 ret = PTR_ERR(root);
902 goto out; 903 goto out;
904 }
903 905
904 root->fs_info = btrfs_alloc_dummy_fs_info(); 906 root->fs_info = btrfs_alloc_dummy_fs_info();
905 if (!root->fs_info) 907 if (!root->fs_info)
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 418c6a2ad7d8..3367a3c6f214 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -592,6 +592,38 @@ struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
592 return start_transaction(root, num_items, TRANS_START, 592 return start_transaction(root, num_items, TRANS_START,
593 BTRFS_RESERVE_FLUSH_ALL); 593 BTRFS_RESERVE_FLUSH_ALL);
594} 594}
595struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
596 struct btrfs_root *root,
597 unsigned int num_items,
598 int min_factor)
599{
600 struct btrfs_trans_handle *trans;
601 u64 num_bytes;
602 int ret;
603
604 trans = btrfs_start_transaction(root, num_items);
605 if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
606 return trans;
607
608 trans = btrfs_start_transaction(root, 0);
609 if (IS_ERR(trans))
610 return trans;
611
612 num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
613 ret = btrfs_cond_migrate_bytes(root->fs_info,
614 &root->fs_info->trans_block_rsv,
615 num_bytes,
616 min_factor);
617 if (ret) {
618 btrfs_end_transaction(trans, root);
619 return ERR_PTR(ret);
620 }
621
622 trans->block_rsv = &root->fs_info->trans_block_rsv;
623 trans->bytes_reserved = num_bytes;
624
625 return trans;
626}
595 627
596struct btrfs_trans_handle *btrfs_start_transaction_lflush( 628struct btrfs_trans_handle *btrfs_start_transaction_lflush(
597 struct btrfs_root *root, 629 struct btrfs_root *root,
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index b05b2f64d913..0da21ca9b3fb 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -185,6 +185,10 @@ int btrfs_end_transaction(struct btrfs_trans_handle *trans,
185 struct btrfs_root *root); 185 struct btrfs_root *root);
186struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, 186struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
187 unsigned int num_items); 187 unsigned int num_items);
188struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
189 struct btrfs_root *root,
190 unsigned int num_items,
191 int min_factor);
188struct btrfs_trans_handle *btrfs_start_transaction_lflush( 192struct btrfs_trans_handle *btrfs_start_transaction_lflush(
189 struct btrfs_root *root, 193 struct btrfs_root *root,
190 unsigned int num_items); 194 unsigned int num_items);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index a6df8fdc1312..456452206609 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1973,8 +1973,7 @@ void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info,
1973 if (srcdev->writeable) { 1973 if (srcdev->writeable) {
1974 fs_devices->rw_devices--; 1974 fs_devices->rw_devices--;
1975 /* zero out the old super if it is writable */ 1975 /* zero out the old super if it is writable */
1976 btrfs_scratch_superblocks(srcdev->bdev, 1976 btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str);
1977 rcu_str_deref(srcdev->name));
1978 } 1977 }
1979 1978
1980 if (srcdev->bdev) 1979 if (srcdev->bdev)
@@ -2024,8 +2023,7 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
2024 btrfs_sysfs_rm_device_link(fs_info->fs_devices, tgtdev); 2023 btrfs_sysfs_rm_device_link(fs_info->fs_devices, tgtdev);
2025 2024
2026 if (tgtdev->bdev) { 2025 if (tgtdev->bdev) {
2027 btrfs_scratch_superblocks(tgtdev->bdev, 2026 btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str);
2028 rcu_str_deref(tgtdev->name));
2029 fs_info->fs_devices->open_devices--; 2027 fs_info->fs_devices->open_devices--;
2030 } 2028 }
2031 fs_info->fs_devices->num_devices--; 2029 fs_info->fs_devices->num_devices--;
@@ -2853,7 +2851,8 @@ static int btrfs_relocate_chunk(struct btrfs_root *root, u64 chunk_offset)
2853 if (ret) 2851 if (ret)
2854 return ret; 2852 return ret;
2855 2853
2856 trans = btrfs_start_transaction(root, 0); 2854 trans = btrfs_start_trans_remove_block_group(root->fs_info,
2855 chunk_offset);
2857 if (IS_ERR(trans)) { 2856 if (IS_ERR(trans)) {
2858 ret = PTR_ERR(trans); 2857 ret = PTR_ERR(trans);
2859 btrfs_std_error(root->fs_info, ret, NULL); 2858 btrfs_std_error(root->fs_info, ret, NULL);
@@ -3123,7 +3122,7 @@ static int chunk_profiles_filter(u64 chunk_type,
3123 return 1; 3122 return 1;
3124} 3123}
3125 3124
3126static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, 3125static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
3127 struct btrfs_balance_args *bargs) 3126 struct btrfs_balance_args *bargs)
3128{ 3127{
3129 struct btrfs_block_group_cache *cache; 3128 struct btrfs_block_group_cache *cache;
@@ -3156,7 +3155,7 @@ static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
3156 return ret; 3155 return ret;
3157} 3156}
3158 3157
3159static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, 3158static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3160 u64 chunk_offset, struct btrfs_balance_args *bargs) 3159 u64 chunk_offset, struct btrfs_balance_args *bargs)
3161{ 3160{
3162 struct btrfs_block_group_cache *cache; 3161 struct btrfs_block_group_cache *cache;
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index ec5712372732..d5c84f6b1353 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -382,7 +382,7 @@ struct map_lookup {
382#define BTRFS_BALANCE_ARGS_LIMIT (1ULL << 5) 382#define BTRFS_BALANCE_ARGS_LIMIT (1ULL << 5)
383#define BTRFS_BALANCE_ARGS_LIMIT_RANGE (1ULL << 6) 383#define BTRFS_BALANCE_ARGS_LIMIT_RANGE (1ULL << 6)
384#define BTRFS_BALANCE_ARGS_STRIPES_RANGE (1ULL << 7) 384#define BTRFS_BALANCE_ARGS_STRIPES_RANGE (1ULL << 7)
385#define BTRFS_BALANCE_ARGS_USAGE_RANGE (1ULL << 8) 385#define BTRFS_BALANCE_ARGS_USAGE_RANGE (1ULL << 10)
386 386
387#define BTRFS_BALANCE_ARGS_MASK \ 387#define BTRFS_BALANCE_ARGS_MASK \
388 (BTRFS_BALANCE_ARGS_PROFILES | \ 388 (BTRFS_BALANCE_ARGS_PROFILES | \
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index 7a6b02f72787..c0f3da3926a0 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -879,7 +879,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
879 loff_t pos, eof; 879 loff_t pos, eof;
880 size_t len; 880 size_t len;
881 void *data; 881 void *data;
882 int ret; 882 int ret = -ENOBUFS;
883 883
884 ASSERT(op != NULL); 884 ASSERT(op != NULL);
885 ASSERT(page != NULL); 885 ASSERT(page != NULL);
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index c81ce7f200a6..a7a1b218f308 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -1636,6 +1636,116 @@ const struct file_operations configfs_dir_operations = {
1636 .iterate = configfs_readdir, 1636 .iterate = configfs_readdir,
1637}; 1637};
1638 1638
1639/**
1640 * configfs_register_group - creates a parent-child relation between two groups
1641 * @parent_group: parent group
1642 * @group: child group
1643 *
1644 * link groups, creates dentry for the child and attaches it to the
1645 * parent dentry.
1646 *
1647 * Return: 0 on success, negative errno code on error
1648 */
1649int configfs_register_group(struct config_group *parent_group,
1650 struct config_group *group)
1651{
1652 struct configfs_subsystem *subsys = parent_group->cg_subsys;
1653 struct dentry *parent;
1654 int ret;
1655
1656 mutex_lock(&subsys->su_mutex);
1657 link_group(parent_group, group);
1658 mutex_unlock(&subsys->su_mutex);
1659
1660 parent = parent_group->cg_item.ci_dentry;
1661
1662 mutex_lock_nested(&d_inode(parent)->i_mutex, I_MUTEX_PARENT);
1663 ret = create_default_group(parent_group, group);
1664 if (!ret) {
1665 spin_lock(&configfs_dirent_lock);
1666 configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata);
1667 spin_unlock(&configfs_dirent_lock);
1668 }
1669 mutex_unlock(&d_inode(parent)->i_mutex);
1670 return ret;
1671}
1672EXPORT_SYMBOL(configfs_register_group);
1673
1674/**
1675 * configfs_unregister_group() - unregisters a child group from its parent
1676 * @group: parent group to be unregistered
1677 *
1678 * Undoes configfs_register_group()
1679 */
1680void configfs_unregister_group(struct config_group *group)
1681{
1682 struct configfs_subsystem *subsys = group->cg_subsys;
1683 struct dentry *dentry = group->cg_item.ci_dentry;
1684 struct dentry *parent = group->cg_item.ci_parent->ci_dentry;
1685
1686 mutex_lock_nested(&d_inode(parent)->i_mutex, I_MUTEX_PARENT);
1687 spin_lock(&configfs_dirent_lock);
1688 configfs_detach_prep(dentry, NULL);
1689 spin_unlock(&configfs_dirent_lock);
1690
1691 configfs_detach_group(&group->cg_item);
1692 d_inode(dentry)->i_flags |= S_DEAD;
1693 dont_mount(dentry);
1694 d_delete(dentry);
1695 mutex_unlock(&d_inode(parent)->i_mutex);
1696
1697 dput(dentry);
1698
1699 mutex_lock(&subsys->su_mutex);
1700 unlink_group(group);
1701 mutex_unlock(&subsys->su_mutex);
1702}
1703EXPORT_SYMBOL(configfs_unregister_group);
1704
1705/**
1706 * configfs_register_default_group() - allocates and registers a child group
1707 * @parent_group: parent group
1708 * @name: child group name
1709 * @item_type: child item type description
1710 *
1711 * boilerplate to allocate and register a child group with its parent. We need
1712 * kzalloc'ed memory because child's default_group is initially empty.
1713 *
1714 * Return: allocated config group or ERR_PTR() on error
1715 */
1716struct config_group *
1717configfs_register_default_group(struct config_group *parent_group,
1718 const char *name,
1719 struct config_item_type *item_type)
1720{
1721 int ret;
1722 struct config_group *group;
1723
1724 group = kzalloc(sizeof(*group), GFP_KERNEL);
1725 if (!group)
1726 return ERR_PTR(-ENOMEM);
1727 config_group_init_type_name(group, name, item_type);
1728
1729 ret = configfs_register_group(parent_group, group);
1730 if (ret) {
1731 kfree(group);
1732 return ERR_PTR(ret);
1733 }
1734 return group;
1735}
1736EXPORT_SYMBOL(configfs_register_default_group);
1737
1738/**
1739 * configfs_unregister_default_group() - unregisters and frees a child group
1740 * @group: the group to act on
1741 */
1742void configfs_unregister_default_group(struct config_group *group)
1743{
1744 configfs_unregister_group(group);
1745 kfree(group);
1746}
1747EXPORT_SYMBOL(configfs_unregister_default_group);
1748
1639int configfs_register_subsystem(struct configfs_subsystem *subsys) 1749int configfs_register_subsystem(struct configfs_subsystem *subsys)
1640{ 1750{
1641 int err; 1751 int err;
diff --git a/fs/dax.c b/fs/dax.c
index d1e5cb7311a1..43671b68220e 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -541,6 +541,10 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
541 unsigned long pfn; 541 unsigned long pfn;
542 int result = 0; 542 int result = 0;
543 543
544 /* dax pmd mappings are broken wrt gup and fork */
545 if (!IS_ENABLED(CONFIG_FS_DAX_PMD))
546 return VM_FAULT_FALLBACK;
547
544 /* Fall back to PTEs if we're going to COW */ 548 /* Fall back to PTEs if we're going to COW */
545 if (write && !(vma->vm_flags & VM_SHARED)) 549 if (write && !(vma->vm_flags & VM_SHARED))
546 return VM_FAULT_FALLBACK; 550 return VM_FAULT_FALLBACK;
diff --git a/fs/direct-io.c b/fs/direct-io.c
index cb5337d8c273..1c75a3a07f8f 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -1169,6 +1169,15 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
1169 } 1169 }
1170 } 1170 }
1171 1171
1172 /* Once we sampled i_size check for reads beyond EOF */
1173 dio->i_size = i_size_read(inode);
1174 if (iov_iter_rw(iter) == READ && offset >= dio->i_size) {
1175 if (dio->flags & DIO_LOCKING)
1176 mutex_unlock(&inode->i_mutex);
1177 kmem_cache_free(dio_cache, dio);
1178 goto out;
1179 }
1180
1172 /* 1181 /*
1173 * For file extending writes updating i_size before data writeouts 1182 * For file extending writes updating i_size before data writeouts
1174 * complete can expose uninitialized blocks in dumb filesystems. 1183 * complete can expose uninitialized blocks in dumb filesystems.
@@ -1222,7 +1231,6 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
1222 sdio.next_block_for_io = -1; 1231 sdio.next_block_for_io = -1;
1223 1232
1224 dio->iocb = iocb; 1233 dio->iocb = iocb;
1225 dio->i_size = i_size_read(inode);
1226 1234
1227 spin_lock_init(&dio->bio_lock); 1235 spin_lock_init(&dio->bio_lock);
1228 dio->refcount = 1; 1236 dio->refcount = 1;
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 87e9d796cf7d..3a37bd3f9637 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -421,7 +421,7 @@ static void lowcomms_write_space(struct sock *sk)
421 421
422 if (test_and_clear_bit(CF_APP_LIMITED, &con->flags)) { 422 if (test_and_clear_bit(CF_APP_LIMITED, &con->flags)) {
423 con->sock->sk->sk_write_pending--; 423 con->sock->sk->sk_write_pending--;
424 clear_bit(SOCK_ASYNC_NOSPACE, &con->sock->flags); 424 clear_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags);
425 } 425 }
426 426
427 if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) 427 if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags))
@@ -1448,7 +1448,7 @@ static void send_to_sock(struct connection *con)
1448 msg_flags); 1448 msg_flags);
1449 if (ret == -EAGAIN || ret == 0) { 1449 if (ret == -EAGAIN || ret == 0) {
1450 if (ret == -EAGAIN && 1450 if (ret == -EAGAIN &&
1451 test_bit(SOCK_ASYNC_NOSPACE, &con->sock->flags) && 1451 test_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags) &&
1452 !test_and_set_bit(CF_APP_LIMITED, &con->flags)) { 1452 !test_and_set_bit(CF_APP_LIMITED, &con->flags)) {
1453 /* Notify TCP that we're limited by the 1453 /* Notify TCP that we're limited by the
1454 * application window size. 1454 * application window size.
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 3a71cea68420..748d35afc902 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -569,6 +569,8 @@ static int parse_options(char *options, struct super_block *sb)
569 /* Fall through */ 569 /* Fall through */
570 case Opt_dax: 570 case Opt_dax:
571#ifdef CONFIG_FS_DAX 571#ifdef CONFIG_FS_DAX
572 ext2_msg(sb, KERN_WARNING,
573 "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
572 set_opt(sbi->s_mount_opt, DAX); 574 set_opt(sbi->s_mount_opt, DAX);
573#else 575#else
574 ext2_msg(sb, KERN_INFO, "dax option not supported"); 576 ext2_msg(sb, KERN_INFO, "dax option not supported");
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 753f4e68b820..c9ab67da6e5a 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1664,8 +1664,12 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
1664 } 1664 }
1665 sbi->s_jquota_fmt = m->mount_opt; 1665 sbi->s_jquota_fmt = m->mount_opt;
1666#endif 1666#endif
1667#ifndef CONFIG_FS_DAX
1668 } else if (token == Opt_dax) { 1667 } else if (token == Opt_dax) {
1668#ifdef CONFIG_FS_DAX
1669 ext4_msg(sb, KERN_WARNING,
1670 "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
1671 sbi->s_mount_opt |= m->mount_opt;
1672#else
1669 ext4_msg(sb, KERN_INFO, "dax option not supported"); 1673 ext4_msg(sb, KERN_INFO, "dax option not supported");
1670 return -1; 1674 return -1;
1671#endif 1675#endif
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index 4afc4d9d2e41..8b2127ffb226 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -610,9 +610,9 @@ parse_record:
610 int status = fat_parse_long(inode, &cpos, &bh, &de, 610 int status = fat_parse_long(inode, &cpos, &bh, &de,
611 &unicode, &nr_slots); 611 &unicode, &nr_slots);
612 if (status < 0) { 612 if (status < 0) {
613 ctx->pos = cpos; 613 bh = NULL;
614 ret = status; 614 ret = status;
615 goto out; 615 goto end_of_dir;
616 } else if (status == PARSE_INVALID) 616 } else if (status == PARSE_INVALID)
617 goto record_end; 617 goto record_end;
618 else if (status == PARSE_NOT_LONGNAME) 618 else if (status == PARSE_NOT_LONGNAME)
@@ -654,8 +654,9 @@ parse_record:
654 fill_len = short_len; 654 fill_len = short_len;
655 655
656start_filldir: 656start_filldir:
657 if (!fake_offset) 657 ctx->pos = cpos - (nr_slots + 1) * sizeof(struct msdos_dir_entry);
658 ctx->pos = cpos - (nr_slots + 1) * sizeof(struct msdos_dir_entry); 658 if (fake_offset && ctx->pos < 2)
659 ctx->pos = 2;
659 660
660 if (!memcmp(de->name, MSDOS_DOT, MSDOS_NAME)) { 661 if (!memcmp(de->name, MSDOS_DOT, MSDOS_NAME)) {
661 if (!dir_emit_dot(file, ctx)) 662 if (!dir_emit_dot(file, ctx))
@@ -681,14 +682,19 @@ record_end:
681 fake_offset = 0; 682 fake_offset = 0;
682 ctx->pos = cpos; 683 ctx->pos = cpos;
683 goto get_new; 684 goto get_new;
685
684end_of_dir: 686end_of_dir:
685 ctx->pos = cpos; 687 if (fake_offset && cpos < 2)
688 ctx->pos = 2;
689 else
690 ctx->pos = cpos;
686fill_failed: 691fill_failed:
687 brelse(bh); 692 brelse(bh);
688 if (unicode) 693 if (unicode)
689 __putname(unicode); 694 __putname(unicode);
690out: 695out:
691 mutex_unlock(&sbi->s_lock); 696 mutex_unlock(&sbi->s_lock);
697
692 return ret; 698 return ret;
693} 699}
694 700
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 316adb968b65..de4bdfac0cec 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -332,12 +332,17 @@ static void remove_huge_page(struct page *page)
332 * truncation is indicated by end of range being LLONG_MAX 332 * truncation is indicated by end of range being LLONG_MAX
333 * In this case, we first scan the range and release found pages. 333 * In this case, we first scan the range and release found pages.
334 * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv 334 * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv
335 * maps and global counts. 335 * maps and global counts. Page faults can not race with truncation
336 * in this routine. hugetlb_no_page() prevents page faults in the
337 * truncated range. It checks i_size before allocation, and again after
338 * with the page table lock for the page held. The same lock must be
339 * acquired to unmap a page.
336 * hole punch is indicated if end is not LLONG_MAX 340 * hole punch is indicated if end is not LLONG_MAX
337 * In the hole punch case we scan the range and release found pages. 341 * In the hole punch case we scan the range and release found pages.
338 * Only when releasing a page is the associated region/reserv map 342 * Only when releasing a page is the associated region/reserv map
339 * deleted. The region/reserv map for ranges without associated 343 * deleted. The region/reserv map for ranges without associated
340 * pages are not modified. 344 * pages are not modified. Page faults can race with hole punch.
345 * This is indicated if we find a mapped page.
341 * Note: If the passed end of range value is beyond the end of file, but 346 * Note: If the passed end of range value is beyond the end of file, but
342 * not LLONG_MAX this routine still performs a hole punch operation. 347 * not LLONG_MAX this routine still performs a hole punch operation.
343 */ 348 */
@@ -361,46 +366,37 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
361 next = start; 366 next = start;
362 while (next < end) { 367 while (next < end) {
363 /* 368 /*
364 * Make sure to never grab more pages that we 369 * Don't grab more pages than the number left in the range.
365 * might possibly need.
366 */ 370 */
367 if (end - next < lookup_nr) 371 if (end - next < lookup_nr)
368 lookup_nr = end - next; 372 lookup_nr = end - next;
369 373
370 /* 374 /*
371 * This pagevec_lookup() may return pages past 'end', 375 * When no more pages are found, we are done.
372 * so we must check for page->index > end.
373 */ 376 */
374 if (!pagevec_lookup(&pvec, mapping, next, lookup_nr)) { 377 if (!pagevec_lookup(&pvec, mapping, next, lookup_nr))
375 if (next == start) 378 break;
376 break;
377 next = start;
378 continue;
379 }
380 379
381 for (i = 0; i < pagevec_count(&pvec); ++i) { 380 for (i = 0; i < pagevec_count(&pvec); ++i) {
382 struct page *page = pvec.pages[i]; 381 struct page *page = pvec.pages[i];
383 u32 hash; 382 u32 hash;
384 383
384 /*
385 * The page (index) could be beyond end. This is
386 * only possible in the punch hole case as end is
387 * max page offset in the truncate case.
388 */
389 next = page->index;
390 if (next >= end)
391 break;
392
385 hash = hugetlb_fault_mutex_hash(h, current->mm, 393 hash = hugetlb_fault_mutex_hash(h, current->mm,
386 &pseudo_vma, 394 &pseudo_vma,
387 mapping, next, 0); 395 mapping, next, 0);
388 mutex_lock(&hugetlb_fault_mutex_table[hash]); 396 mutex_lock(&hugetlb_fault_mutex_table[hash]);
389 397
390 lock_page(page); 398 lock_page(page);
391 if (page->index >= end) { 399 if (likely(!page_mapped(page))) {
392 unlock_page(page);
393 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
394 next = end; /* we are done */
395 break;
396 }
397
398 /*
399 * If page is mapped, it was faulted in after being
400 * unmapped. Do nothing in this race case. In the
401 * normal case page is not mapped.
402 */
403 if (!page_mapped(page)) {
404 bool rsv_on_error = !PagePrivate(page); 400 bool rsv_on_error = !PagePrivate(page);
405 /* 401 /*
406 * We must free the huge page and remove 402 * We must free the huge page and remove
@@ -421,17 +417,23 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
421 hugetlb_fix_reserve_counts( 417 hugetlb_fix_reserve_counts(
422 inode, rsv_on_error); 418 inode, rsv_on_error);
423 } 419 }
420 } else {
421 /*
422 * If page is mapped, it was faulted in after
423 * being unmapped. It indicates a race between
424 * hole punch and page fault. Do nothing in
425 * this case. Getting here in a truncate
426 * operation is a bug.
427 */
428 BUG_ON(truncate_op);
424 } 429 }
425 430
426 if (page->index > next)
427 next = page->index;
428
429 ++next;
430 unlock_page(page); 431 unlock_page(page);
431
432 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 432 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
433 } 433 }
434 ++next;
434 huge_pagevec_release(&pvec); 435 huge_pagevec_release(&pvec);
436 cond_resched();
435 } 437 }
436 438
437 if (truncate_op) 439 if (truncate_op)
@@ -647,9 +649,6 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
647 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) 649 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
648 i_size_write(inode, offset + len); 650 i_size_write(inode, offset + len);
649 inode->i_ctime = CURRENT_TIME; 651 inode->i_ctime = CURRENT_TIME;
650 spin_lock(&inode->i_lock);
651 inode->i_private = NULL;
652 spin_unlock(&inode->i_lock);
653out: 652out:
654 mutex_unlock(&inode->i_mutex); 653 mutex_unlock(&inode->i_mutex);
655 return error; 654 return error;
diff --git a/fs/namei.c b/fs/namei.c
index d84d7c7515fc..0c3974cd3ecd 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1996,7 +1996,6 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
1996 nd->last_type = LAST_ROOT; /* if there are only slashes... */ 1996 nd->last_type = LAST_ROOT; /* if there are only slashes... */
1997 nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT; 1997 nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT;
1998 nd->depth = 0; 1998 nd->depth = 0;
1999 nd->total_link_count = 0;
2000 if (flags & LOOKUP_ROOT) { 1999 if (flags & LOOKUP_ROOT) {
2001 struct dentry *root = nd->root.dentry; 2000 struct dentry *root = nd->root.dentry;
2002 struct inode *inode = root->d_inode; 2001 struct inode *inode = root->d_inode;
diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c
index 79b113048eac..0a3f9b594602 100644
--- a/fs/ncpfs/ioctl.c
+++ b/fs/ncpfs/ioctl.c
@@ -525,6 +525,8 @@ static long __ncp_ioctl(struct inode *inode, unsigned int cmd, unsigned long arg
525 switch (rqdata.cmd) { 525 switch (rqdata.cmd) {
526 case NCP_LOCK_EX: 526 case NCP_LOCK_EX:
527 case NCP_LOCK_SH: 527 case NCP_LOCK_SH:
528 if (rqdata.timeout < 0)
529 return -EINVAL;
528 if (rqdata.timeout == 0) 530 if (rqdata.timeout == 0)
529 rqdata.timeout = NCP_LOCK_DEFAULT_TIMEOUT; 531 rqdata.timeout = NCP_LOCK_DEFAULT_TIMEOUT;
530 else if (rqdata.timeout > NCP_LOCK_MAX_TIMEOUT) 532 else if (rqdata.timeout > NCP_LOCK_MAX_TIMEOUT)
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index 646cdac73488..beac58b0e09c 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -78,7 +78,8 @@ static __be32 *read_buf(struct xdr_stream *xdr, int nbytes)
78 78
79 p = xdr_inline_decode(xdr, nbytes); 79 p = xdr_inline_decode(xdr, nbytes);
80 if (unlikely(p == NULL)) 80 if (unlikely(p == NULL))
81 printk(KERN_WARNING "NFS: NFSv4 callback reply buffer overflowed!\n"); 81 printk(KERN_WARNING "NFS: NFSv4 callback reply buffer overflowed "
82 "or truncated request.\n");
82 return p; 83 return p;
83} 84}
84 85
@@ -889,6 +890,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
889 struct cb_compound_hdr_arg hdr_arg = { 0 }; 890 struct cb_compound_hdr_arg hdr_arg = { 0 };
890 struct cb_compound_hdr_res hdr_res = { NULL }; 891 struct cb_compound_hdr_res hdr_res = { NULL };
891 struct xdr_stream xdr_in, xdr_out; 892 struct xdr_stream xdr_in, xdr_out;
893 struct xdr_buf *rq_arg = &rqstp->rq_arg;
892 __be32 *p, status; 894 __be32 *p, status;
893 struct cb_process_state cps = { 895 struct cb_process_state cps = {
894 .drc_status = 0, 896 .drc_status = 0,
@@ -900,7 +902,8 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
900 902
901 dprintk("%s: start\n", __func__); 903 dprintk("%s: start\n", __func__);
902 904
903 xdr_init_decode(&xdr_in, &rqstp->rq_arg, rqstp->rq_arg.head[0].iov_base); 905 rq_arg->len = rq_arg->head[0].iov_len + rq_arg->page_len;
906 xdr_init_decode(&xdr_in, rq_arg, rq_arg->head[0].iov_base);
904 907
905 p = (__be32*)((char *)rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len); 908 p = (__be32*)((char *)rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len);
906 xdr_init_encode(&xdr_out, &rqstp->rq_res, p); 909 xdr_init_encode(&xdr_out, &rqstp->rq_res, p);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 326d9e10d833..31b0a52223a7 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -618,7 +618,10 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr,
618 nfs_inc_stats(inode, NFSIOS_SETATTRTRUNC); 618 nfs_inc_stats(inode, NFSIOS_SETATTRTRUNC);
619 nfs_vmtruncate(inode, attr->ia_size); 619 nfs_vmtruncate(inode, attr->ia_size);
620 } 620 }
621 nfs_update_inode(inode, fattr); 621 if (fattr->valid)
622 nfs_update_inode(inode, fattr);
623 else
624 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
622 spin_unlock(&inode->i_lock); 625 spin_unlock(&inode->i_lock);
623} 626}
624EXPORT_SYMBOL_GPL(nfs_setattr_update_inode); 627EXPORT_SYMBOL_GPL(nfs_setattr_update_inode);
@@ -1824,7 +1827,11 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1824 if ((long)fattr->gencount - (long)nfsi->attr_gencount > 0) 1827 if ((long)fattr->gencount - (long)nfsi->attr_gencount > 0)
1825 nfsi->attr_gencount = fattr->gencount; 1828 nfsi->attr_gencount = fattr->gencount;
1826 } 1829 }
1827 invalid &= ~NFS_INO_INVALID_ATTR; 1830
1831 /* Don't declare attrcache up to date if there were no attrs! */
1832 if (fattr->valid != 0)
1833 invalid &= ~NFS_INO_INVALID_ATTR;
1834
1828 /* Don't invalidate the data if we were to blame */ 1835 /* Don't invalidate the data if we were to blame */
1829 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) 1836 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)
1830 || S_ISLNK(inode->i_mode))) 1837 || S_ISLNK(inode->i_mode)))
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index 3e92a3cde15d..6b1ce9825430 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -14,7 +14,7 @@
14#include "pnfs.h" 14#include "pnfs.h"
15#include "internal.h" 15#include "internal.h"
16 16
17#define NFSDBG_FACILITY NFSDBG_PNFS 17#define NFSDBG_FACILITY NFSDBG_PROC
18 18
19static int nfs42_set_rw_stateid(nfs4_stateid *dst, struct file *file, 19static int nfs42_set_rw_stateid(nfs4_stateid *dst, struct file *file,
20 fmode_t fmode) 20 fmode_t fmode)
@@ -284,6 +284,7 @@ static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f,
284 .dst_fh = NFS_FH(dst_inode), 284 .dst_fh = NFS_FH(dst_inode),
285 .src_offset = src_offset, 285 .src_offset = src_offset,
286 .dst_offset = dst_offset, 286 .dst_offset = dst_offset,
287 .count = count,
287 .dst_bitmask = server->cache_consistency_bitmask, 288 .dst_bitmask = server->cache_consistency_bitmask,
288 }; 289 };
289 struct nfs42_clone_res res = { 290 struct nfs42_clone_res res = {
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 223bedda64ae..10410e8b5853 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -33,7 +33,7 @@ static int nfs_get_cb_ident_idr(struct nfs_client *clp, int minorversion)
33 return ret; 33 return ret;
34 idr_preload(GFP_KERNEL); 34 idr_preload(GFP_KERNEL);
35 spin_lock(&nn->nfs_client_lock); 35 spin_lock(&nn->nfs_client_lock);
36 ret = idr_alloc(&nn->cb_ident_idr, clp, 0, 0, GFP_NOWAIT); 36 ret = idr_alloc(&nn->cb_ident_idr, clp, 1, 0, GFP_NOWAIT);
37 if (ret >= 0) 37 if (ret >= 0)
38 clp->cl_cb_ident = ret; 38 clp->cl_cb_ident = ret;
39 spin_unlock(&nn->nfs_client_lock); 39 spin_unlock(&nn->nfs_client_lock);
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 4aa571956cd6..db9b5fea5b3e 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -7,6 +7,7 @@
7#include <linux/file.h> 7#include <linux/file.h>
8#include <linux/falloc.h> 8#include <linux/falloc.h>
9#include <linux/nfs_fs.h> 9#include <linux/nfs_fs.h>
10#include <uapi/linux/btrfs.h> /* BTRFS_IOC_CLONE/BTRFS_IOC_CLONE_RANGE */
10#include "delegation.h" 11#include "delegation.h"
11#include "internal.h" 12#include "internal.h"
12#include "iostat.h" 13#include "iostat.h"
@@ -203,6 +204,7 @@ nfs42_ioctl_clone(struct file *dst_file, unsigned long srcfd,
203 struct fd src_file; 204 struct fd src_file;
204 struct inode *src_inode; 205 struct inode *src_inode;
205 unsigned int bs = server->clone_blksize; 206 unsigned int bs = server->clone_blksize;
207 bool same_inode = false;
206 int ret; 208 int ret;
207 209
208 /* dst file must be opened for writing */ 210 /* dst file must be opened for writing */
@@ -221,10 +223,8 @@ nfs42_ioctl_clone(struct file *dst_file, unsigned long srcfd,
221 223
222 src_inode = file_inode(src_file.file); 224 src_inode = file_inode(src_file.file);
223 225
224 /* src and dst must be different files */
225 ret = -EINVAL;
226 if (src_inode == dst_inode) 226 if (src_inode == dst_inode)
227 goto out_fput; 227 same_inode = true;
228 228
229 /* src file must be opened for reading */ 229 /* src file must be opened for reading */
230 if (!(src_file.file->f_mode & FMODE_READ)) 230 if (!(src_file.file->f_mode & FMODE_READ))
@@ -249,8 +249,16 @@ nfs42_ioctl_clone(struct file *dst_file, unsigned long srcfd,
249 goto out_fput; 249 goto out_fput;
250 } 250 }
251 251
252 /* verify if ranges are overlapped within the same file */
253 if (same_inode) {
254 if (dst_off + count > src_off && dst_off < src_off + count)
255 goto out_fput;
256 }
257
252 /* XXX: do we lock at all? what if server needs CB_RECALL_LAYOUT? */ 258 /* XXX: do we lock at all? what if server needs CB_RECALL_LAYOUT? */
253 if (dst_inode < src_inode) { 259 if (same_inode) {
260 mutex_lock(&src_inode->i_mutex);
261 } else if (dst_inode < src_inode) {
254 mutex_lock_nested(&dst_inode->i_mutex, I_MUTEX_PARENT); 262 mutex_lock_nested(&dst_inode->i_mutex, I_MUTEX_PARENT);
255 mutex_lock_nested(&src_inode->i_mutex, I_MUTEX_CHILD); 263 mutex_lock_nested(&src_inode->i_mutex, I_MUTEX_CHILD);
256 } else { 264 } else {
@@ -275,7 +283,9 @@ nfs42_ioctl_clone(struct file *dst_file, unsigned long srcfd,
275 truncate_inode_pages_range(&dst_inode->i_data, dst_off, dst_off + count - 1); 283 truncate_inode_pages_range(&dst_inode->i_data, dst_off, dst_off + count - 1);
276 284
277out_unlock: 285out_unlock:
278 if (dst_inode < src_inode) { 286 if (same_inode) {
287 mutex_unlock(&src_inode->i_mutex);
288 } else if (dst_inode < src_inode) {
279 mutex_unlock(&src_inode->i_mutex); 289 mutex_unlock(&src_inode->i_mutex);
280 mutex_unlock(&dst_inode->i_mutex); 290 mutex_unlock(&dst_inode->i_mutex);
281 } else { 291 } else {
@@ -291,46 +301,31 @@ out_drop_write:
291 301
292static long nfs42_ioctl_clone_range(struct file *dst_file, void __user *argp) 302static long nfs42_ioctl_clone_range(struct file *dst_file, void __user *argp)
293{ 303{
294 struct nfs_ioctl_clone_range_args args; 304 struct btrfs_ioctl_clone_range_args args;
295 305
296 if (copy_from_user(&args, argp, sizeof(args))) 306 if (copy_from_user(&args, argp, sizeof(args)))
297 return -EFAULT; 307 return -EFAULT;
298 308
299 return nfs42_ioctl_clone(dst_file, args.src_fd, args.src_off, args.dst_off, args.count); 309 return nfs42_ioctl_clone(dst_file, args.src_fd, args.src_offset,
300} 310 args.dest_offset, args.src_length);
301#else
302static long nfs42_ioctl_clone(struct file *dst_file, unsigned long srcfd,
303 u64 src_off, u64 dst_off, u64 count)
304{
305 return -ENOTTY;
306}
307
308static long nfs42_ioctl_clone_range(struct file *dst_file, void __user *argp)
309{
310 return -ENOTTY;
311} 311}
312#endif /* CONFIG_NFS_V4_2 */
313 312
314long nfs4_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 313long nfs4_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
315{ 314{
316 void __user *argp = (void __user *)arg; 315 void __user *argp = (void __user *)arg;
317 316
318 switch (cmd) { 317 switch (cmd) {
319 case NFS_IOC_CLONE: 318 case BTRFS_IOC_CLONE:
320 return nfs42_ioctl_clone(file, arg, 0, 0, 0); 319 return nfs42_ioctl_clone(file, arg, 0, 0, 0);
321 case NFS_IOC_CLONE_RANGE: 320 case BTRFS_IOC_CLONE_RANGE:
322 return nfs42_ioctl_clone_range(file, argp); 321 return nfs42_ioctl_clone_range(file, argp);
323 } 322 }
324 323
325 return -ENOTTY; 324 return -ENOTTY;
326} 325}
326#endif /* CONFIG_NFS_V4_2 */
327 327
328const struct file_operations nfs4_file_operations = { 328const struct file_operations nfs4_file_operations = {
329#ifdef CONFIG_NFS_V4_2
330 .llseek = nfs4_file_llseek,
331#else
332 .llseek = nfs_file_llseek,
333#endif
334 .read_iter = nfs_file_read, 329 .read_iter = nfs_file_read,
335 .write_iter = nfs_file_write, 330 .write_iter = nfs_file_write,
336 .mmap = nfs_file_mmap, 331 .mmap = nfs_file_mmap,
@@ -342,14 +337,14 @@ const struct file_operations nfs4_file_operations = {
342 .flock = nfs_flock, 337 .flock = nfs_flock,
343 .splice_read = nfs_file_splice_read, 338 .splice_read = nfs_file_splice_read,
344 .splice_write = iter_file_splice_write, 339 .splice_write = iter_file_splice_write,
345#ifdef CONFIG_NFS_V4_2
346 .fallocate = nfs42_fallocate,
347#endif /* CONFIG_NFS_V4_2 */
348 .check_flags = nfs_check_flags, 340 .check_flags = nfs_check_flags,
349 .setlease = simple_nosetlease, 341 .setlease = simple_nosetlease,
350#ifdef CONFIG_COMPAT 342#ifdef CONFIG_NFS_V4_2
343 .llseek = nfs4_file_llseek,
344 .fallocate = nfs42_fallocate,
351 .unlocked_ioctl = nfs4_ioctl, 345 .unlocked_ioctl = nfs4_ioctl,
352#else
353 .compat_ioctl = nfs4_ioctl, 346 .compat_ioctl = nfs4_ioctl,
354#endif /* CONFIG_COMPAT */ 347#else
348 .llseek = nfs_file_llseek,
349#endif
355}; 350};
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 765a03559363..89818036f035 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -7866,7 +7866,7 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
7866 spin_unlock(&inode->i_lock); 7866 spin_unlock(&inode->i_lock);
7867 goto out_restart; 7867 goto out_restart;
7868 } 7868 }
7869 if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN) 7869 if (nfs4_async_handle_error(task, server, state, &lgp->timeout) == -EAGAIN)
7870 goto out_restart; 7870 goto out_restart;
7871out: 7871out:
7872 dprintk("<-- %s\n", __func__); 7872 dprintk("<-- %s\n", __func__);
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index dfed4f5c8fcc..4e4441216804 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -3615,6 +3615,7 @@ static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, st
3615 status = 0; 3615 status = 0;
3616 if (unlikely(!(bitmap[0] & FATTR4_WORD0_FS_LOCATIONS))) 3616 if (unlikely(!(bitmap[0] & FATTR4_WORD0_FS_LOCATIONS)))
3617 goto out; 3617 goto out;
3618 bitmap[0] &= ~FATTR4_WORD0_FS_LOCATIONS;
3618 status = -EIO; 3619 status = -EIO;
3619 /* Ignore borken servers that return unrequested attrs */ 3620 /* Ignore borken servers that return unrequested attrs */
3620 if (unlikely(res == NULL)) 3621 if (unlikely(res == NULL))
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 93496c059837..5a8ae2125b50 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -872,33 +872,38 @@ send_layoutget(struct pnfs_layout_hdr *lo,
872 872
873 dprintk("--> %s\n", __func__); 873 dprintk("--> %s\n", __func__);
874 874
875 lgp = kzalloc(sizeof(*lgp), gfp_flags); 875 /*
876 if (lgp == NULL) 876 * Synchronously retrieve layout information from server and
877 return NULL; 877 * store in lseg. If we race with a concurrent seqid morphing
878 * op, then re-send the LAYOUTGET.
879 */
880 do {
881 lgp = kzalloc(sizeof(*lgp), gfp_flags);
882 if (lgp == NULL)
883 return NULL;
884
885 i_size = i_size_read(ino);
886
887 lgp->args.minlength = PAGE_CACHE_SIZE;
888 if (lgp->args.minlength > range->length)
889 lgp->args.minlength = range->length;
890 if (range->iomode == IOMODE_READ) {
891 if (range->offset >= i_size)
892 lgp->args.minlength = 0;
893 else if (i_size - range->offset < lgp->args.minlength)
894 lgp->args.minlength = i_size - range->offset;
895 }
896 lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
897 lgp->args.range = *range;
898 lgp->args.type = server->pnfs_curr_ld->id;
899 lgp->args.inode = ino;
900 lgp->args.ctx = get_nfs_open_context(ctx);
901 lgp->gfp_flags = gfp_flags;
902 lgp->cred = lo->plh_lc_cred;
878 903
879 i_size = i_size_read(ino); 904 lseg = nfs4_proc_layoutget(lgp, gfp_flags);
905 } while (lseg == ERR_PTR(-EAGAIN));
880 906
881 lgp->args.minlength = PAGE_CACHE_SIZE;
882 if (lgp->args.minlength > range->length)
883 lgp->args.minlength = range->length;
884 if (range->iomode == IOMODE_READ) {
885 if (range->offset >= i_size)
886 lgp->args.minlength = 0;
887 else if (i_size - range->offset < lgp->args.minlength)
888 lgp->args.minlength = i_size - range->offset;
889 }
890 lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
891 lgp->args.range = *range;
892 lgp->args.type = server->pnfs_curr_ld->id;
893 lgp->args.inode = ino;
894 lgp->args.ctx = get_nfs_open_context(ctx);
895 lgp->gfp_flags = gfp_flags;
896 lgp->cred = lo->plh_lc_cred;
897
898 /* Synchronously retrieve layout information from server and
899 * store in lseg.
900 */
901 lseg = nfs4_proc_layoutget(lgp, gfp_flags);
902 if (IS_ERR(lseg)) { 907 if (IS_ERR(lseg)) {
903 switch (PTR_ERR(lseg)) { 908 switch (PTR_ERR(lseg)) {
904 case -ENOMEM: 909 case -ENOMEM:
@@ -1687,6 +1692,7 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
1687 /* existing state ID, make sure the sequence number matches. */ 1692 /* existing state ID, make sure the sequence number matches. */
1688 if (pnfs_layout_stateid_blocked(lo, &res->stateid)) { 1693 if (pnfs_layout_stateid_blocked(lo, &res->stateid)) {
1689 dprintk("%s forget reply due to sequence\n", __func__); 1694 dprintk("%s forget reply due to sequence\n", __func__);
1695 status = -EAGAIN;
1690 goto out_forget_reply; 1696 goto out_forget_reply;
1691 } 1697 }
1692 pnfs_set_layout_stateid(lo, &res->stateid, false); 1698 pnfs_set_layout_stateid(lo, &res->stateid, false);
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 3b48ac25d8a7..a03f6f433075 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -372,6 +372,8 @@ static int ocfs2_mknod(struct inode *dir,
372 mlog_errno(status); 372 mlog_errno(status);
373 goto leave; 373 goto leave;
374 } 374 }
375 /* update inode->i_mode after mask with "umask". */
376 inode->i_mode = mode;
375 377
376 handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb, 378 handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb,
377 S_ISDIR(mode), 379 S_ISDIR(mode),
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 871fcb67be97..0a8983492d91 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -195,8 +195,7 @@ int ovl_set_attr(struct dentry *upperdentry, struct kstat *stat)
195 195
196static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir, 196static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
197 struct dentry *dentry, struct path *lowerpath, 197 struct dentry *dentry, struct path *lowerpath,
198 struct kstat *stat, struct iattr *attr, 198 struct kstat *stat, const char *link)
199 const char *link)
200{ 199{
201 struct inode *wdir = workdir->d_inode; 200 struct inode *wdir = workdir->d_inode;
202 struct inode *udir = upperdir->d_inode; 201 struct inode *udir = upperdir->d_inode;
@@ -240,8 +239,6 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
240 239
241 mutex_lock(&newdentry->d_inode->i_mutex); 240 mutex_lock(&newdentry->d_inode->i_mutex);
242 err = ovl_set_attr(newdentry, stat); 241 err = ovl_set_attr(newdentry, stat);
243 if (!err && attr)
244 err = notify_change(newdentry, attr, NULL);
245 mutex_unlock(&newdentry->d_inode->i_mutex); 242 mutex_unlock(&newdentry->d_inode->i_mutex);
246 if (err) 243 if (err)
247 goto out_cleanup; 244 goto out_cleanup;
@@ -286,8 +283,7 @@ out_cleanup:
286 * that point the file will have already been copied up anyway. 283 * that point the file will have already been copied up anyway.
287 */ 284 */
288int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry, 285int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
289 struct path *lowerpath, struct kstat *stat, 286 struct path *lowerpath, struct kstat *stat)
290 struct iattr *attr)
291{ 287{
292 struct dentry *workdir = ovl_workdir(dentry); 288 struct dentry *workdir = ovl_workdir(dentry);
293 int err; 289 int err;
@@ -345,26 +341,19 @@ int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
345 } 341 }
346 upperdentry = ovl_dentry_upper(dentry); 342 upperdentry = ovl_dentry_upper(dentry);
347 if (upperdentry) { 343 if (upperdentry) {
348 unlock_rename(workdir, upperdir); 344 /* Raced with another copy-up? Nothing to do, then... */
349 err = 0; 345 err = 0;
350 /* Raced with another copy-up? Do the setattr here */ 346 goto out_unlock;
351 if (attr) {
352 mutex_lock(&upperdentry->d_inode->i_mutex);
353 err = notify_change(upperdentry, attr, NULL);
354 mutex_unlock(&upperdentry->d_inode->i_mutex);
355 }
356 goto out_put_cred;
357 } 347 }
358 348
359 err = ovl_copy_up_locked(workdir, upperdir, dentry, lowerpath, 349 err = ovl_copy_up_locked(workdir, upperdir, dentry, lowerpath,
360 stat, attr, link); 350 stat, link);
361 if (!err) { 351 if (!err) {
362 /* Restore timestamps on parent (best effort) */ 352 /* Restore timestamps on parent (best effort) */
363 ovl_set_timestamps(upperdir, &pstat); 353 ovl_set_timestamps(upperdir, &pstat);
364 } 354 }
365out_unlock: 355out_unlock:
366 unlock_rename(workdir, upperdir); 356 unlock_rename(workdir, upperdir);
367out_put_cred:
368 revert_creds(old_cred); 357 revert_creds(old_cred);
369 put_cred(override_cred); 358 put_cred(override_cred);
370 359
@@ -406,7 +395,7 @@ int ovl_copy_up(struct dentry *dentry)
406 ovl_path_lower(next, &lowerpath); 395 ovl_path_lower(next, &lowerpath);
407 err = vfs_getattr(&lowerpath, &stat); 396 err = vfs_getattr(&lowerpath, &stat);
408 if (!err) 397 if (!err)
409 err = ovl_copy_up_one(parent, next, &lowerpath, &stat, NULL); 398 err = ovl_copy_up_one(parent, next, &lowerpath, &stat);
410 399
411 dput(parent); 400 dput(parent);
412 dput(next); 401 dput(next);
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index ec0c2a050043..4060ffde8722 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -12,8 +12,7 @@
12#include <linux/xattr.h> 12#include <linux/xattr.h>
13#include "overlayfs.h" 13#include "overlayfs.h"
14 14
15static int ovl_copy_up_last(struct dentry *dentry, struct iattr *attr, 15static int ovl_copy_up_truncate(struct dentry *dentry)
16 bool no_data)
17{ 16{
18 int err; 17 int err;
19 struct dentry *parent; 18 struct dentry *parent;
@@ -30,10 +29,8 @@ static int ovl_copy_up_last(struct dentry *dentry, struct iattr *attr,
30 if (err) 29 if (err)
31 goto out_dput_parent; 30 goto out_dput_parent;
32 31
33 if (no_data) 32 stat.size = 0;
34 stat.size = 0; 33 err = ovl_copy_up_one(parent, dentry, &lowerpath, &stat);
35
36 err = ovl_copy_up_one(parent, dentry, &lowerpath, &stat, attr);
37 34
38out_dput_parent: 35out_dput_parent:
39 dput(parent); 36 dput(parent);
@@ -49,13 +46,13 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
49 if (err) 46 if (err)
50 goto out; 47 goto out;
51 48
52 upperdentry = ovl_dentry_upper(dentry); 49 err = ovl_copy_up(dentry);
53 if (upperdentry) { 50 if (!err) {
51 upperdentry = ovl_dentry_upper(dentry);
52
54 mutex_lock(&upperdentry->d_inode->i_mutex); 53 mutex_lock(&upperdentry->d_inode->i_mutex);
55 err = notify_change(upperdentry, attr, NULL); 54 err = notify_change(upperdentry, attr, NULL);
56 mutex_unlock(&upperdentry->d_inode->i_mutex); 55 mutex_unlock(&upperdentry->d_inode->i_mutex);
57 } else {
58 err = ovl_copy_up_last(dentry, attr, false);
59 } 56 }
60 ovl_drop_write(dentry); 57 ovl_drop_write(dentry);
61out: 58out:
@@ -353,7 +350,7 @@ struct inode *ovl_d_select_inode(struct dentry *dentry, unsigned file_flags)
353 return ERR_PTR(err); 350 return ERR_PTR(err);
354 351
355 if (file_flags & O_TRUNC) 352 if (file_flags & O_TRUNC)
356 err = ovl_copy_up_last(dentry, NULL, true); 353 err = ovl_copy_up_truncate(dentry);
357 else 354 else
358 err = ovl_copy_up(dentry); 355 err = ovl_copy_up(dentry);
359 ovl_drop_write(dentry); 356 ovl_drop_write(dentry);
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index ea5a40b06e3a..e17154aeaae4 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -194,7 +194,6 @@ void ovl_cleanup(struct inode *dir, struct dentry *dentry);
194/* copy_up.c */ 194/* copy_up.c */
195int ovl_copy_up(struct dentry *dentry); 195int ovl_copy_up(struct dentry *dentry);
196int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry, 196int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
197 struct path *lowerpath, struct kstat *stat, 197 struct path *lowerpath, struct kstat *stat);
198 struct iattr *attr);
199int ovl_copy_xattr(struct dentry *old, struct dentry *new); 198int ovl_copy_xattr(struct dentry *old, struct dentry *new);
200int ovl_set_attr(struct dentry *upper, struct kstat *stat); 199int ovl_set_attr(struct dentry *upper, struct kstat *stat);
diff --git a/fs/splice.c b/fs/splice.c
index 801c21cd77fe..4cf700d50b40 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -809,6 +809,13 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
809 */ 809 */
810static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd) 810static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
811{ 811{
812 /*
813 * Check for signal early to make process killable when there are
814 * always buffers available
815 */
816 if (signal_pending(current))
817 return -ERESTARTSYS;
818
812 while (!pipe->nrbufs) { 819 while (!pipe->nrbufs) {
813 if (!pipe->writers) 820 if (!pipe->writers)
814 return 0; 821 return 0;
@@ -884,6 +891,7 @@ ssize_t __splice_from_pipe(struct pipe_inode_info *pipe, struct splice_desc *sd,
884 891
885 splice_from_pipe_begin(sd); 892 splice_from_pipe_begin(sd);
886 do { 893 do {
894 cond_resched();
887 ret = splice_from_pipe_next(pipe, sd); 895 ret = splice_from_pipe_next(pipe, sd);
888 if (ret > 0) 896 if (ret > 0)
889 ret = splice_from_pipe_feed(pipe, sd, actor); 897 ret = splice_from_pipe_feed(pipe, sd, actor);
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
index 590ad9206e3f..02fa1dcc5969 100644
--- a/fs/sysv/inode.c
+++ b/fs/sysv/inode.c
@@ -162,15 +162,8 @@ void sysv_set_inode(struct inode *inode, dev_t rdev)
162 inode->i_fop = &sysv_dir_operations; 162 inode->i_fop = &sysv_dir_operations;
163 inode->i_mapping->a_ops = &sysv_aops; 163 inode->i_mapping->a_ops = &sysv_aops;
164 } else if (S_ISLNK(inode->i_mode)) { 164 } else if (S_ISLNK(inode->i_mode)) {
165 if (inode->i_blocks) { 165 inode->i_op = &sysv_symlink_inode_operations;
166 inode->i_op = &sysv_symlink_inode_operations; 166 inode->i_mapping->a_ops = &sysv_aops;
167 inode->i_mapping->a_ops = &sysv_aops;
168 } else {
169 inode->i_op = &simple_symlink_inode_operations;
170 inode->i_link = (char *)SYSV_I(inode)->i_data;
171 nd_terminate_link(inode->i_link, inode->i_size,
172 sizeof(SYSV_I(inode)->i_data) - 1);
173 }
174 } else 167 } else
175 init_special_inode(inode, inode->i_mode, rdev); 168 init_special_inode(inode, inode->i_mode, rdev);
176} 169}
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 0b921ae06cd8..5531d7bbe851 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -309,6 +309,11 @@ struct drm_file {
309 unsigned universal_planes:1; 309 unsigned universal_planes:1;
310 /* true if client understands atomic properties */ 310 /* true if client understands atomic properties */
311 unsigned atomic:1; 311 unsigned atomic:1;
312 /*
313 * This client is allowed to gain master privileges for @master.
314 * Protected by struct drm_device::master_mutex.
315 */
316 unsigned allowed_master:1;
312 317
313 struct pid *pid; 318 struct pid *pid;
314 kuid_t uid; 319 kuid_t uid;
@@ -344,6 +349,8 @@ struct drm_file {
344 struct list_head event_list; 349 struct list_head event_list;
345 int event_space; 350 int event_space;
346 351
352 struct mutex event_read_lock;
353
347 struct drm_prime_file_private prime; 354 struct drm_prime_file_private prime;
348}; 355};
349 356
@@ -580,6 +587,13 @@ struct drm_driver {
580 int (*gem_open_object) (struct drm_gem_object *, struct drm_file *); 587 int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
581 void (*gem_close_object) (struct drm_gem_object *, struct drm_file *); 588 void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
582 589
590 /**
591 * Hook for allocating the GEM object struct, for use by core
592 * helpers.
593 */
594 struct drm_gem_object *(*gem_create_object)(struct drm_device *dev,
595 size_t size);
596
583 /* prime: */ 597 /* prime: */
584 /* export handle -> fd (see drm_gem_prime_handle_to_fd() helper) */ 598 /* export handle -> fd (see drm_gem_prime_handle_to_fd() helper) */
585 int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv, 599 int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv,
@@ -910,6 +924,7 @@ extern int drm_open(struct inode *inode, struct file *filp);
910extern ssize_t drm_read(struct file *filp, char __user *buffer, 924extern ssize_t drm_read(struct file *filp, char __user *buffer,
911 size_t count, loff_t *offset); 925 size_t count, loff_t *offset);
912extern int drm_release(struct inode *inode, struct file *filp); 926extern int drm_release(struct inode *inode, struct file *filp);
927extern int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv);
913 928
914 /* Mapping support (drm_vm.h) */ 929 /* Mapping support (drm_vm.h) */
915extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); 930extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
@@ -947,6 +962,10 @@ extern void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe,
947 struct drm_pending_vblank_event *e); 962 struct drm_pending_vblank_event *e);
948extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc, 963extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
949 struct drm_pending_vblank_event *e); 964 struct drm_pending_vblank_event *e);
965extern void drm_arm_vblank_event(struct drm_device *dev, unsigned int pipe,
966 struct drm_pending_vblank_event *e);
967extern void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
968 struct drm_pending_vblank_event *e);
950extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe); 969extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe);
951extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc); 970extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
952extern int drm_vblank_get(struct drm_device *dev, unsigned int pipe); 971extern int drm_vblank_get(struct drm_device *dev, unsigned int pipe);
@@ -1111,4 +1130,7 @@ static __inline__ bool drm_can_sleep(void)
1111 return true; 1130 return true;
1112} 1131}
1113 1132
1133/* helper for handling conditionals in various for_each macros */
1134#define for_each_if(condition) if (!(condition)) {} else
1135
1114#endif 1136#endif
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index e67aeac2aee0..d8576ac55693 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -136,6 +136,9 @@ drm_atomic_connectors_for_crtc(struct drm_atomic_state *state,
136 136
137void drm_atomic_legacy_backoff(struct drm_atomic_state *state); 137void drm_atomic_legacy_backoff(struct drm_atomic_state *state);
138 138
139void
140drm_atomic_clean_old_fb(struct drm_device *dev, unsigned plane_mask, int ret);
141
139int __must_check drm_atomic_check_only(struct drm_atomic_state *state); 142int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
140int __must_check drm_atomic_commit(struct drm_atomic_state *state); 143int __must_check drm_atomic_commit(struct drm_atomic_state *state);
141int __must_check drm_atomic_async_commit(struct drm_atomic_state *state); 144int __must_check drm_atomic_async_commit(struct drm_atomic_state *state);
@@ -146,7 +149,7 @@ int __must_check drm_atomic_async_commit(struct drm_atomic_state *state);
146 ((connector) = (state)->connectors[__i], \ 149 ((connector) = (state)->connectors[__i], \
147 (connector_state) = (state)->connector_states[__i], 1); \ 150 (connector_state) = (state)->connector_states[__i], 1); \
148 (__i)++) \ 151 (__i)++) \
149 if (connector) 152 for_each_if (connector)
150 153
151#define for_each_crtc_in_state(state, crtc, crtc_state, __i) \ 154#define for_each_crtc_in_state(state, crtc, crtc_state, __i) \
152 for ((__i) = 0; \ 155 for ((__i) = 0; \
@@ -154,7 +157,7 @@ int __must_check drm_atomic_async_commit(struct drm_atomic_state *state);
154 ((crtc) = (state)->crtcs[__i], \ 157 ((crtc) = (state)->crtcs[__i], \
155 (crtc_state) = (state)->crtc_states[__i], 1); \ 158 (crtc_state) = (state)->crtc_states[__i], 1); \
156 (__i)++) \ 159 (__i)++) \
157 if (crtc_state) 160 for_each_if (crtc_state)
158 161
159#define for_each_plane_in_state(state, plane, plane_state, __i) \ 162#define for_each_plane_in_state(state, plane, plane_state, __i) \
160 for ((__i) = 0; \ 163 for ((__i) = 0; \
@@ -162,7 +165,7 @@ int __must_check drm_atomic_async_commit(struct drm_atomic_state *state);
162 ((plane) = (state)->planes[__i], \ 165 ((plane) = (state)->planes[__i], \
163 (plane_state) = (state)->plane_states[__i], 1); \ 166 (plane_state) = (state)->plane_states[__i], 1); \
164 (__i)++) \ 167 (__i)++) \
165 if (plane_state) 168 for_each_if (plane_state)
166static inline bool 169static inline bool
167drm_atomic_crtc_needs_modeset(struct drm_crtc_state *state) 170drm_atomic_crtc_needs_modeset(struct drm_crtc_state *state)
168{ 171{
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index 8cba54a2a0a0..a286cce98720 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -62,6 +62,8 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
62void drm_atomic_helper_cleanup_planes(struct drm_device *dev, 62void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
63 struct drm_atomic_state *old_state); 63 struct drm_atomic_state *old_state);
64void drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state); 64void drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state);
65void drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc *crtc,
66 bool atomic);
65 67
66void drm_atomic_helper_swap_state(struct drm_device *dev, 68void drm_atomic_helper_swap_state(struct drm_device *dev,
67 struct drm_atomic_state *state); 69 struct drm_atomic_state *state);
@@ -81,6 +83,12 @@ int drm_atomic_helper_set_config(struct drm_mode_set *set);
81int __drm_atomic_helper_set_config(struct drm_mode_set *set, 83int __drm_atomic_helper_set_config(struct drm_mode_set *set,
82 struct drm_atomic_state *state); 84 struct drm_atomic_state *state);
83 85
86int drm_atomic_helper_disable_all(struct drm_device *dev,
87 struct drm_modeset_acquire_ctx *ctx);
88struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev);
89int drm_atomic_helper_resume(struct drm_device *dev,
90 struct drm_atomic_state *state);
91
84int drm_atomic_helper_crtc_set_property(struct drm_crtc *crtc, 92int drm_atomic_helper_crtc_set_property(struct drm_crtc *crtc,
85 struct drm_property *property, 93 struct drm_property *property,
86 uint64_t val); 94 uint64_t val);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 3f0c6909dda1..c2f98ba2bb98 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -85,7 +85,11 @@ static inline uint64_t I642U64(int64_t val)
85 return (uint64_t)*((uint64_t *)&val); 85 return (uint64_t)*((uint64_t *)&val);
86} 86}
87 87
88/* rotation property bits */ 88/*
89 * Rotation property bits. DRM_ROTATE_<degrees> rotates the image by the
90 * specified amount in degrees in counter clockwise direction. DRM_REFLECT_X and
91 * DRM_REFLECT_Y reflects the image along the specified axis prior to rotation
92 */
89#define DRM_ROTATE_MASK 0x0f 93#define DRM_ROTATE_MASK 0x0f
90#define DRM_ROTATE_0 0 94#define DRM_ROTATE_0 0
91#define DRM_ROTATE_90 1 95#define DRM_ROTATE_90 1
@@ -158,23 +162,55 @@ struct drm_tile_group {
158 u8 group_data[8]; 162 u8 group_data[8];
159}; 163};
160 164
165/**
166 * struct drm_framebuffer_funcs - framebuffer hooks
167 */
161struct drm_framebuffer_funcs { 168struct drm_framebuffer_funcs {
162 /* note: use drm_framebuffer_remove() */ 169 /**
170 * @destroy:
171 *
172 * Clean up framebuffer resources, specifically also unreference the
173 * backing storage. The core guarantees to call this function for every
174 * framebuffer successfully created by ->fb_create() in
175 * &drm_mode_config_funcs.
176 */
163 void (*destroy)(struct drm_framebuffer *framebuffer); 177 void (*destroy)(struct drm_framebuffer *framebuffer);
178
179 /**
180 * @create_handle:
181 *
182 * Create a buffer handle in the driver-specific buffer manager (either
183 * GEM or TTM) valid for the passed-in struct &drm_file. This is used by
184 * the core to implement the GETFB IOCTL, which returns (for
185 * sufficiently priviledged user) also a native buffer handle. This can
186 * be used for seamless transitions between modesetting clients by
187 * copying the current screen contents to a private buffer and blending
188 * between that and the new contents.
189 *
190 * RETURNS:
191 *
192 * 0 on success or a negative error code on failure.
193 */
164 int (*create_handle)(struct drm_framebuffer *fb, 194 int (*create_handle)(struct drm_framebuffer *fb,
165 struct drm_file *file_priv, 195 struct drm_file *file_priv,
166 unsigned int *handle); 196 unsigned int *handle);
167 /* 197 /**
168 * Optional callback for the dirty fb ioctl. 198 * @dirty:
199 *
200 * Optional callback for the dirty fb IOCTL.
201 *
202 * Userspace can notify the driver via this callback that an area of the
203 * framebuffer has changed and should be flushed to the display
204 * hardware. This can also be used internally, e.g. by the fbdev
205 * emulation, though that's not the case currently.
206 *
207 * See documentation in drm_mode.h for the struct drm_mode_fb_dirty_cmd
208 * for more information as all the semantics and arguments have a one to
209 * one mapping on this function.
169 * 210 *
170 * Userspace can notify the driver via this callback 211 * RETURNS:
171 * that a area of the framebuffer has changed and should
172 * be flushed to the display hardware.
173 * 212 *
174 * See documentation in drm_mode.h for the struct 213 * 0 on success or a negative error code on failure.
175 * drm_mode_fb_dirty_cmd for more information as all
176 * the semantics and arguments have a one to one mapping
177 * on this function.
178 */ 214 */
179 int (*dirty)(struct drm_framebuffer *framebuffer, 215 int (*dirty)(struct drm_framebuffer *framebuffer,
180 struct drm_file *file_priv, unsigned flags, 216 struct drm_file *file_priv, unsigned flags,
@@ -250,6 +286,11 @@ struct drm_plane;
250struct drm_bridge; 286struct drm_bridge;
251struct drm_atomic_state; 287struct drm_atomic_state;
252 288
289struct drm_crtc_helper_funcs;
290struct drm_encoder_helper_funcs;
291struct drm_connector_helper_funcs;
292struct drm_plane_helper_funcs;
293
253/** 294/**
254 * struct drm_crtc_state - mutable CRTC state 295 * struct drm_crtc_state - mutable CRTC state
255 * @crtc: backpointer to the CRTC 296 * @crtc: backpointer to the CRTC
@@ -311,23 +352,6 @@ struct drm_crtc_state {
311 352
312/** 353/**
313 * struct drm_crtc_funcs - control CRTCs for a given device 354 * struct drm_crtc_funcs - control CRTCs for a given device
314 * @save: save CRTC state
315 * @restore: restore CRTC state
316 * @reset: reset CRTC after state has been invalidated (e.g. resume)
317 * @cursor_set: setup the cursor
318 * @cursor_set2: setup the cursor with hotspot, superseeds @cursor_set if set
319 * @cursor_move: move the cursor
320 * @gamma_set: specify color ramp for CRTC
321 * @destroy: deinit and free object
322 * @set_property: called when a property is changed
323 * @set_config: apply a new CRTC configuration
324 * @page_flip: initiate a page flip
325 * @atomic_duplicate_state: duplicate the atomic state for this CRTC
326 * @atomic_destroy_state: destroy an atomic state for this CRTC
327 * @atomic_set_property: set a property on an atomic state for this CRTC
328 * (do not call directly, use drm_atomic_crtc_set_property())
329 * @atomic_get_property: get a property on an atomic state for this CRTC
330 * (do not call directly, use drm_atomic_crtc_get_property())
331 * 355 *
332 * The drm_crtc_funcs structure is the central CRTC management structure 356 * The drm_crtc_funcs structure is the central CRTC management structure
333 * in the DRM. Each CRTC controls one or more connectors (note that the name 357 * in the DRM. Each CRTC controls one or more connectors (note that the name
@@ -339,54 +363,316 @@ struct drm_crtc_state {
339 * bus accessors. 363 * bus accessors.
340 */ 364 */
341struct drm_crtc_funcs { 365struct drm_crtc_funcs {
342 /* Save CRTC state */ 366 /**
343 void (*save)(struct drm_crtc *crtc); /* suspend? */ 367 * @reset:
344 /* Restore CRTC state */ 368 *
345 void (*restore)(struct drm_crtc *crtc); /* resume? */ 369 * Reset CRTC hardware and software state to off. This function isn't
346 /* Reset CRTC state */ 370 * called by the core directly, only through drm_mode_config_reset().
371 * It's not a helper hook only for historical reasons.
372 *
373 * Atomic drivers can use drm_atomic_helper_crtc_reset() to reset
374 * atomic state using this hook.
375 */
347 void (*reset)(struct drm_crtc *crtc); 376 void (*reset)(struct drm_crtc *crtc);
348 377
349 /* cursor controls */ 378 /**
379 * @cursor_set:
380 *
381 * Update the cursor image. The cursor position is relative to the CRTC
382 * and can be partially or fully outside of the visible area.
383 *
384 * Note that contrary to all other KMS functions the legacy cursor entry
385 * points don't take a framebuffer object, but instead take directly a
386 * raw buffer object id from the driver's buffer manager (which is
387 * either GEM or TTM for current drivers).
388 *
389 * This entry point is deprecated, drivers should instead implement
390 * universal plane support and register a proper cursor plane using
391 * drm_crtc_init_with_planes().
392 *
393 * This callback is optional
394 *
395 * RETURNS:
396 *
397 * 0 on success or a negative error code on failure.
398 */
350 int (*cursor_set)(struct drm_crtc *crtc, struct drm_file *file_priv, 399 int (*cursor_set)(struct drm_crtc *crtc, struct drm_file *file_priv,
351 uint32_t handle, uint32_t width, uint32_t height); 400 uint32_t handle, uint32_t width, uint32_t height);
401
402 /**
403 * @cursor_set2:
404 *
405 * Update the cursor image, including hotspot information. The hotspot
406 * must not affect the cursor position in CRTC coordinates, but is only
407 * meant as a hint for virtualized display hardware to coordinate the
408 * guests and hosts cursor position. The cursor hotspot is relative to
409 * the cursor image. Otherwise this works exactly like @cursor_set.
410 *
411 * This entry point is deprecated, drivers should instead implement
412 * universal plane support and register a proper cursor plane using
413 * drm_crtc_init_with_planes().
414 *
415 * This callback is optional.
416 *
417 * RETURNS:
418 *
419 * 0 on success or a negative error code on failure.
420 */
352 int (*cursor_set2)(struct drm_crtc *crtc, struct drm_file *file_priv, 421 int (*cursor_set2)(struct drm_crtc *crtc, struct drm_file *file_priv,
353 uint32_t handle, uint32_t width, uint32_t height, 422 uint32_t handle, uint32_t width, uint32_t height,
354 int32_t hot_x, int32_t hot_y); 423 int32_t hot_x, int32_t hot_y);
424
425 /**
426 * @cursor_move:
427 *
428 * Update the cursor position. The cursor does not need to be visible
429 * when this hook is called.
430 *
431 * This entry point is deprecated, drivers should instead implement
432 * universal plane support and register a proper cursor plane using
433 * drm_crtc_init_with_planes().
434 *
435 * This callback is optional.
436 *
437 * RETURNS:
438 *
439 * 0 on success or a negative error code on failure.
440 */
355 int (*cursor_move)(struct drm_crtc *crtc, int x, int y); 441 int (*cursor_move)(struct drm_crtc *crtc, int x, int y);
356 442
357 /* Set gamma on the CRTC */ 443 /**
444 * @gamma_set:
445 *
446 * Set gamma on the CRTC.
447 *
448 * This callback is optional.
449 *
450 * NOTE:
451 *
452 * Drivers that support gamma tables and also fbdev emulation through
453 * the provided helper library need to take care to fill out the gamma
454 * hooks for both. Currently there's a bit an unfortunate duplication
455 * going on, which should eventually be unified to just one set of
456 * hooks.
457 */
358 void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, 458 void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
359 uint32_t start, uint32_t size); 459 uint32_t start, uint32_t size);
360 /* Object destroy routine */ 460
461 /**
462 * @destroy:
463 *
464 * Clean up plane resources. This is only called at driver unload time
465 * through drm_mode_config_cleanup() since a CRTC cannot be hotplugged
466 * in DRM.
467 */
361 void (*destroy)(struct drm_crtc *crtc); 468 void (*destroy)(struct drm_crtc *crtc);
362 469
470 /**
471 * @set_config:
472 *
473 * This is the main legacy entry point to change the modeset state on a
474 * CRTC. All the details of the desired configuration are passed in a
475 * struct &drm_mode_set - see there for details.
476 *
477 * Drivers implementing atomic modeset should use
478 * drm_atomic_helper_set_config() to implement this hook.
479 *
480 * RETURNS:
481 *
482 * 0 on success or a negative error code on failure.
483 */
363 int (*set_config)(struct drm_mode_set *set); 484 int (*set_config)(struct drm_mode_set *set);
364 485
365 /* 486 /**
366 * Flip to the given framebuffer. This implements the page 487 * @page_flip:
367 * flip ioctl described in drm_mode.h, specifically, the 488 *
368 * implementation must return immediately and block all 489 * Legacy entry point to schedule a flip to the given framebuffer.
369 * rendering to the current fb until the flip has completed. 490 *
370 * If userspace set the event flag in the ioctl, the event 491 * Page flipping is a synchronization mechanism that replaces the frame
371 * argument will point to an event to send back when the flip 492 * buffer being scanned out by the CRTC with a new frame buffer during
372 * completes, otherwise it will be NULL. 493 * vertical blanking, avoiding tearing (except when requested otherwise
494 * through the DRM_MODE_PAGE_FLIP_ASYNC flag). When an application
495 * requests a page flip the DRM core verifies that the new frame buffer
496 * is large enough to be scanned out by the CRTC in the currently
497 * configured mode and then calls the CRTC ->page_flip() operation with a
498 * pointer to the new frame buffer.
499 *
500 * The driver must wait for any pending rendering to the new framebuffer
501 * to complete before executing the flip. It should also wait for any
502 * pending rendering from other drivers if the underlying buffer is a
503 * shared dma-buf.
504 *
505 * An application can request to be notified when the page flip has
506 * completed. The drm core will supply a struct &drm_event in the event
507 * parameter in this case. This can be handled by the
508 * drm_crtc_send_vblank_event() function, which the driver should call on
509 * the provided event upon completion of the flip. Note that if
510 * the driver supports vblank signalling and timestamping the vblank
511 * counters and timestamps must agree with the ones returned from page
512 * flip events. With the current vblank helper infrastructure this can
513 * be achieved by holding a vblank reference while the page flip is
514 * pending, acquired through drm_crtc_vblank_get() and released with
515 * drm_crtc_vblank_put(). Drivers are free to implement their own vblank
516 * counter and timestamp tracking though, e.g. if they have accurate
517 * timestamp registers in hardware.
518 *
519 * FIXME:
520 *
521 * Up to that point drivers need to manage events themselves and can use
522 * even->base.list freely for that. Specifically they need to ensure
523 * that they don't send out page flip (or vblank) events for which the
524 * corresponding drm file has been closed already. The drm core
525 * unfortunately does not (yet) take care of that. Therefore drivers
526 * currently must clean up and release pending events in their
527 * ->preclose driver function.
528 *
529 * This callback is optional.
530 *
531 * NOTE:
532 *
533 * Very early versions of the KMS ABI mandated that the driver must
534 * block (but not reject) any rendering to the old framebuffer until the
535 * flip operation has completed and the old framebuffer is no longer
536 * visible. This requirement has been lifted, and userspace is instead
537 * expected to request delivery of an event and wait with recycling old
538 * buffers until such has been received.
539 *
540 * RETURNS:
541 *
542 * 0 on success or a negative error code on failure. Note that if a
543 * ->page_flip() operation is already pending the callback should return
544 * -EBUSY. Pageflips on a disabled CRTC (either by setting a NULL mode
545 * or just runtime disabled through DPMS respectively the new atomic
546 * "ACTIVE" state) should result in an -EINVAL error code.
373 */ 547 */
374 int (*page_flip)(struct drm_crtc *crtc, 548 int (*page_flip)(struct drm_crtc *crtc,
375 struct drm_framebuffer *fb, 549 struct drm_framebuffer *fb,
376 struct drm_pending_vblank_event *event, 550 struct drm_pending_vblank_event *event,
377 uint32_t flags); 551 uint32_t flags);
378 552
553 /**
554 * @set_property:
555 *
556 * This is the legacy entry point to update a property attached to the
557 * CRTC.
558 *
559 * Drivers implementing atomic modeset should use
560 * drm_atomic_helper_crtc_set_property() to implement this hook.
561 *
562 * This callback is optional if the driver does not support any legacy
563 * driver-private properties.
564 *
565 * RETURNS:
566 *
567 * 0 on success or a negative error code on failure.
568 */
379 int (*set_property)(struct drm_crtc *crtc, 569 int (*set_property)(struct drm_crtc *crtc,
380 struct drm_property *property, uint64_t val); 570 struct drm_property *property, uint64_t val);
381 571
382 /* atomic update handling */ 572 /**
573 * @atomic_duplicate_state:
574 *
575 * Duplicate the current atomic state for this CRTC and return it.
576 * The core and helpers gurantee that any atomic state duplicated with
577 * this hook and still owned by the caller (i.e. not transferred to the
578 * driver by calling ->atomic_commit() from struct
579 * &drm_mode_config_funcs) will be cleaned up by calling the
580 * @atomic_destroy_state hook in this structure.
581 *
582 * Atomic drivers which don't subclass struct &drm_crtc should use
583 * drm_atomic_helper_crtc_duplicate_state(). Drivers that subclass the
584 * state structure to extend it with driver-private state should use
585 * __drm_atomic_helper_crtc_duplicate_state() to make sure shared state is
586 * duplicated in a consistent fashion across drivers.
587 *
588 * It is an error to call this hook before crtc->state has been
589 * initialized correctly.
590 *
591 * NOTE:
592 *
593 * If the duplicate state references refcounted resources this hook must
594 * acquire a reference for each of them. The driver must release these
595 * references again in @atomic_destroy_state.
596 *
597 * RETURNS:
598 *
599 * Duplicated atomic state or NULL when the allocation failed.
600 */
383 struct drm_crtc_state *(*atomic_duplicate_state)(struct drm_crtc *crtc); 601 struct drm_crtc_state *(*atomic_duplicate_state)(struct drm_crtc *crtc);
602
603 /**
604 * @atomic_destroy_state:
605 *
606 * Destroy a state duplicated with @atomic_duplicate_state and release
607 * or unreference all resources it references
608 */
384 void (*atomic_destroy_state)(struct drm_crtc *crtc, 609 void (*atomic_destroy_state)(struct drm_crtc *crtc,
385 struct drm_crtc_state *state); 610 struct drm_crtc_state *state);
611
612 /**
613 * @atomic_set_property:
614 *
615 * Decode a driver-private property value and store the decoded value
616 * into the passed-in state structure. Since the atomic core decodes all
617 * standardized properties (even for extensions beyond the core set of
618 * properties which might not be implemented by all drivers) this
619 * requires drivers to subclass the state structure.
620 *
621 * Such driver-private properties should really only be implemented for
622 * truly hardware/vendor specific state. Instead it is preferred to
623 * standardize atomic extension and decode the properties used to expose
624 * such an extension in the core.
625 *
626 * Do not call this function directly, use
627 * drm_atomic_crtc_set_property() instead.
628 *
629 * This callback is optional if the driver does not support any
630 * driver-private atomic properties.
631 *
632 * NOTE:
633 *
634 * This function is called in the state assembly phase of atomic
635 * modesets, which can be aborted for any reason (including on
636 * userspace's request to just check whether a configuration would be
637 * possible). Drivers MUST NOT touch any persistent state (hardware or
638 * software) or data structures except the passed in @state parameter.
639 *
640 * Also since userspace controls in which order properties are set this
641 * function must not do any input validation (since the state update is
642 * incomplete and hence likely inconsistent). Instead any such input
643 * validation must be done in the various atomic_check callbacks.
644 *
645 * RETURNS:
646 *
647 * 0 if the property has been found, -EINVAL if the property isn't
648 * implemented by the driver (which should never happen, the core only
649 * asks for properties attached to this CRTC). No other validation is
650 * allowed by the driver. The core already checks that the property
651 * value is within the range (integer, valid enum value, ...) the driver
652 * set when registering the property.
653 */
386 int (*atomic_set_property)(struct drm_crtc *crtc, 654 int (*atomic_set_property)(struct drm_crtc *crtc,
387 struct drm_crtc_state *state, 655 struct drm_crtc_state *state,
388 struct drm_property *property, 656 struct drm_property *property,
389 uint64_t val); 657 uint64_t val);
658 /**
659 * @atomic_get_property:
660 *
661 * Reads out the decoded driver-private property. This is used to
662 * implement the GETCRTC IOCTL.
663 *
664 * Do not call this function directly, use
665 * drm_atomic_crtc_get_property() instead.
666 *
667 * This callback is optional if the driver does not support any
668 * driver-private atomic properties.
669 *
670 * RETURNS:
671 *
672 * 0 on success, -EINVAL if the property isn't implemented by the
673 * driver (which should never happen, the core only asks for
674 * properties attached to this CRTC).
675 */
390 int (*atomic_get_property)(struct drm_crtc *crtc, 676 int (*atomic_get_property)(struct drm_crtc *crtc,
391 const struct drm_crtc_state *state, 677 const struct drm_crtc_state *state,
392 struct drm_property *property, 678 struct drm_property *property,
@@ -416,7 +702,7 @@ struct drm_crtc_funcs {
416 * @properties: property tracking for this CRTC 702 * @properties: property tracking for this CRTC
417 * @state: current atomic state for this CRTC 703 * @state: current atomic state for this CRTC
418 * @acquire_ctx: per-CRTC implicit acquire context used by atomic drivers for 704 * @acquire_ctx: per-CRTC implicit acquire context used by atomic drivers for
419 * legacy ioctls 705 * legacy IOCTLs
420 * 706 *
421 * Each CRTC may have one or more connectors associated with it. This structure 707 * Each CRTC may have one or more connectors associated with it. This structure
422 * allows the CRTC to be controlled. 708 * allows the CRTC to be controlled.
@@ -426,6 +712,8 @@ struct drm_crtc {
426 struct device_node *port; 712 struct device_node *port;
427 struct list_head head; 713 struct list_head head;
428 714
715 char *name;
716
429 /* 717 /*
430 * crtc mutex 718 * crtc mutex
431 * 719 *
@@ -463,14 +751,14 @@ struct drm_crtc {
463 uint16_t *gamma_store; 751 uint16_t *gamma_store;
464 752
465 /* if you are using the helper */ 753 /* if you are using the helper */
466 const void *helper_private; 754 const struct drm_crtc_helper_funcs *helper_private;
467 755
468 struct drm_object_properties properties; 756 struct drm_object_properties properties;
469 757
470 struct drm_crtc_state *state; 758 struct drm_crtc_state *state;
471 759
472 /* 760 /*
473 * For legacy crtc ioctls so that atomic drivers can get at the locking 761 * For legacy crtc IOCTLs so that atomic drivers can get at the locking
474 * acquire context. 762 * acquire context.
475 */ 763 */
476 struct drm_modeset_acquire_ctx *acquire_ctx; 764 struct drm_modeset_acquire_ctx *acquire_ctx;
@@ -495,54 +783,239 @@ struct drm_connector_state {
495 783
496/** 784/**
497 * struct drm_connector_funcs - control connectors on a given device 785 * struct drm_connector_funcs - control connectors on a given device
498 * @dpms: set power state
499 * @save: save connector state
500 * @restore: restore connector state
501 * @reset: reset connector after state has been invalidated (e.g. resume)
502 * @detect: is this connector active?
503 * @fill_modes: fill mode list for this connector
504 * @set_property: property for this connector may need an update
505 * @destroy: make object go away
506 * @force: notify the driver that the connector is forced on
507 * @atomic_duplicate_state: duplicate the atomic state for this connector
508 * @atomic_destroy_state: destroy an atomic state for this connector
509 * @atomic_set_property: set a property on an atomic state for this connector
510 * (do not call directly, use drm_atomic_connector_set_property())
511 * @atomic_get_property: get a property on an atomic state for this connector
512 * (do not call directly, use drm_atomic_connector_get_property())
513 * 786 *
514 * Each CRTC may have one or more connectors attached to it. The functions 787 * Each CRTC may have one or more connectors attached to it. The functions
515 * below allow the core DRM code to control connectors, enumerate available modes, 788 * below allow the core DRM code to control connectors, enumerate available modes,
516 * etc. 789 * etc.
517 */ 790 */
518struct drm_connector_funcs { 791struct drm_connector_funcs {
792 /**
793 * @dpms:
794 *
795 * Legacy entry point to set the per-connector DPMS state. Legacy DPMS
796 * is exposed as a standard property on the connector, but diverted to
797 * this callback in the drm core. Note that atomic drivers don't
798 * implement the 4 level DPMS support on the connector any more, but
799 * instead only have an on/off "ACTIVE" property on the CRTC object.
800 *
801 * Drivers implementing atomic modeset should use
802 * drm_atomic_helper_connector_dpms() to implement this hook.
803 *
804 * RETURNS:
805 *
806 * 0 on success or a negative error code on failure.
807 */
519 int (*dpms)(struct drm_connector *connector, int mode); 808 int (*dpms)(struct drm_connector *connector, int mode);
520 void (*save)(struct drm_connector *connector); 809
521 void (*restore)(struct drm_connector *connector); 810 /**
811 * @reset:
812 *
813 * Reset connector hardware and software state to off. This function isn't
814 * called by the core directly, only through drm_mode_config_reset().
815 * It's not a helper hook only for historical reasons.
816 *
817 * Atomic drivers can use drm_atomic_helper_connector_reset() to reset
818 * atomic state using this hook.
819 */
522 void (*reset)(struct drm_connector *connector); 820 void (*reset)(struct drm_connector *connector);
523 821
524 /* Check to see if anything is attached to the connector. 822 /**
525 * @force is set to false whilst polling, true when checking the 823 * @detect:
526 * connector due to user request. @force can be used by the driver 824 *
527 * to avoid expensive, destructive operations during automated 825 * Check to see if anything is attached to the connector. The parameter
528 * probing. 826 * force is set to false whilst polling, true when checking the
827 * connector due to a user request. force can be used by the driver to
828 * avoid expensive, destructive operations during automated probing.
829 *
830 * FIXME:
831 *
832 * Note that this hook is only called by the probe helper. It's not in
833 * the helper library vtable purely for historical reasons. The only DRM
834 * core entry point to probe connector state is @fill_modes.
835 *
836 * RETURNS:
837 *
838 * drm_connector_status indicating the connector's status.
529 */ 839 */
530 enum drm_connector_status (*detect)(struct drm_connector *connector, 840 enum drm_connector_status (*detect)(struct drm_connector *connector,
531 bool force); 841 bool force);
842
843 /**
844 * @force:
845 *
846 * This function is called to update internal encoder state when the
847 * connector is forced to a certain state by userspace, either through
848 * the sysfs interfaces or on the kernel cmdline. In that case the
849 * @detect callback isn't called.
850 *
851 * FIXME:
852 *
853 * Note that this hook is only called by the probe helper. It's not in
854 * the helper library vtable purely for historical reasons. The only DRM
855 * core entry point to probe connector state is @fill_modes.
856 */
857 void (*force)(struct drm_connector *connector);
858
859 /**
860 * @fill_modes:
861 *
862 * Entry point for output detection and basic mode validation. The
863 * driver should reprobe the output if needed (e.g. when hotplug
864 * handling is unreliable), add all detected modes to connector->modes
865 * and filter out any the device can't support in any configuration. It
866 * also needs to filter out any modes wider or higher than the
867 * parameters max_width and max_height indicate.
868 *
869 * The drivers must also prune any modes no longer valid from
870 * connector->modes. Furthermore it must update connector->status and
871 * connector->edid. If no EDID has been received for this output
872 * connector->edid must be NULL.
873 *
874 * Drivers using the probe helpers should use
875 * drm_helper_probe_single_connector_modes() or
876 * drm_helper_probe_single_connector_modes_nomerge() to implement this
877 * function.
878 *
879 * RETURNS:
880 *
881 * The number of modes detected and filled into connector->modes.
882 */
532 int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height); 883 int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height);
884
885 /**
886 * @set_property:
887 *
888 * This is the legacy entry point to update a property attached to the
889 * connector.
890 *
891 * Drivers implementing atomic modeset should use
892 * drm_atomic_helper_connector_set_property() to implement this hook.
893 *
894 * This callback is optional if the driver does not support any legacy
895 * driver-private properties.
896 *
897 * RETURNS:
898 *
899 * 0 on success or a negative error code on failure.
900 */
533 int (*set_property)(struct drm_connector *connector, struct drm_property *property, 901 int (*set_property)(struct drm_connector *connector, struct drm_property *property,
534 uint64_t val); 902 uint64_t val);
903
904 /**
905 * @destroy:
906 *
907 * Clean up connector resources. This is called at driver unload time
908 * through drm_mode_config_cleanup(). It can also be called at runtime
909 * when a connector is being hot-unplugged for drivers that support
910 * connector hotplugging (e.g. DisplayPort MST).
911 */
535 void (*destroy)(struct drm_connector *connector); 912 void (*destroy)(struct drm_connector *connector);
536 void (*force)(struct drm_connector *connector);
537 913
538 /* atomic update handling */ 914 /**
915 * @atomic_duplicate_state:
916 *
917 * Duplicate the current atomic state for this connector and return it.
918 * The core and helpers gurantee that any atomic state duplicated with
919 * this hook and still owned by the caller (i.e. not transferred to the
920 * driver by calling ->atomic_commit() from struct
921 * &drm_mode_config_funcs) will be cleaned up by calling the
922 * @atomic_destroy_state hook in this structure.
923 *
924 * Atomic drivers which don't subclass struct &drm_connector_state should use
925 * drm_atomic_helper_connector_duplicate_state(). Drivers that subclass the
926 * state structure to extend it with driver-private state should use
927 * __drm_atomic_helper_connector_duplicate_state() to make sure shared state is
928 * duplicated in a consistent fashion across drivers.
929 *
930 * It is an error to call this hook before connector->state has been
931 * initialized correctly.
932 *
933 * NOTE:
934 *
935 * If the duplicate state references refcounted resources this hook must
936 * acquire a reference for each of them. The driver must release these
937 * references again in @atomic_destroy_state.
938 *
939 * RETURNS:
940 *
941 * Duplicated atomic state or NULL when the allocation failed.
942 */
539 struct drm_connector_state *(*atomic_duplicate_state)(struct drm_connector *connector); 943 struct drm_connector_state *(*atomic_duplicate_state)(struct drm_connector *connector);
944
945 /**
946 * @atomic_destroy_state:
947 *
948 * Destroy a state duplicated with @atomic_duplicate_state and release
949 * or unreference all resources it references
950 */
540 void (*atomic_destroy_state)(struct drm_connector *connector, 951 void (*atomic_destroy_state)(struct drm_connector *connector,
541 struct drm_connector_state *state); 952 struct drm_connector_state *state);
953
954 /**
955 * @atomic_set_property:
956 *
957 * Decode a driver-private property value and store the decoded value
958 * into the passed-in state structure. Since the atomic core decodes all
959 * standardized properties (even for extensions beyond the core set of
960 * properties which might not be implemented by all drivers) this
961 * requires drivers to subclass the state structure.
962 *
963 * Such driver-private properties should really only be implemented for
964 * truly hardware/vendor specific state. Instead it is preferred to
965 * standardize atomic extension and decode the properties used to expose
966 * such an extension in the core.
967 *
968 * Do not call this function directly, use
969 * drm_atomic_connector_set_property() instead.
970 *
971 * This callback is optional if the driver does not support any
972 * driver-private atomic properties.
973 *
974 * NOTE:
975 *
976 * This function is called in the state assembly phase of atomic
977 * modesets, which can be aborted for any reason (including on
978 * userspace's request to just check whether a configuration would be
979 * possible). Drivers MUST NOT touch any persistent state (hardware or
980 * software) or data structures except the passed in @state parameter.
981 *
982 * Also since userspace controls in which order properties are set this
983 * function must not do any input validation (since the state update is
984 * incomplete and hence likely inconsistent). Instead any such input
985 * validation must be done in the various atomic_check callbacks.
986 *
987 * RETURNS:
988 *
989 * 0 if the property has been found, -EINVAL if the property isn't
990 * implemented by the driver (which shouldn't ever happen, the core only
991 * asks for properties attached to this connector). No other validation
992 * is allowed by the driver. The core already checks that the property
993 * value is within the range (integer, valid enum value, ...) the driver
994 * set when registering the property.
995 */
542 int (*atomic_set_property)(struct drm_connector *connector, 996 int (*atomic_set_property)(struct drm_connector *connector,
543 struct drm_connector_state *state, 997 struct drm_connector_state *state,
544 struct drm_property *property, 998 struct drm_property *property,
545 uint64_t val); 999 uint64_t val);
1000
1001 /**
1002 * @atomic_get_property:
1003 *
1004 * Reads out the decoded driver-private property. This is used to
1005 * implement the GETCONNECTOR IOCTL.
1006 *
1007 * Do not call this function directly, use
1008 * drm_atomic_connector_get_property() instead.
1009 *
1010 * This callback is optional if the driver does not support any
1011 * driver-private atomic properties.
1012 *
1013 * RETURNS:
1014 *
1015 * 0 on success, -EINVAL if the property isn't implemented by the
1016 * driver (which shouldn't ever happen, the core only asks for
1017 * properties attached to this connector).
1018 */
546 int (*atomic_get_property)(struct drm_connector *connector, 1019 int (*atomic_get_property)(struct drm_connector *connector,
547 const struct drm_connector_state *state, 1020 const struct drm_connector_state *state,
548 struct drm_property *property, 1021 struct drm_property *property,
@@ -551,13 +1024,26 @@ struct drm_connector_funcs {
551 1024
552/** 1025/**
553 * struct drm_encoder_funcs - encoder controls 1026 * struct drm_encoder_funcs - encoder controls
554 * @reset: reset state (e.g. at init or resume time)
555 * @destroy: cleanup and free associated data
556 * 1027 *
557 * Encoders sit between CRTCs and connectors. 1028 * Encoders sit between CRTCs and connectors.
558 */ 1029 */
559struct drm_encoder_funcs { 1030struct drm_encoder_funcs {
1031 /**
1032 * @reset:
1033 *
1034 * Reset encoder hardware and software state to off. This function isn't
1035 * called by the core directly, only through drm_mode_config_reset().
1036 * It's not a helper hook only for historical reasons.
1037 */
560 void (*reset)(struct drm_encoder *encoder); 1038 void (*reset)(struct drm_encoder *encoder);
1039
1040 /**
1041 * @destroy:
1042 *
1043 * Clean up encoder resources. This is only called at driver unload time
1044 * through drm_mode_config_cleanup() since an encoder cannot be
1045 * hotplugged in DRM.
1046 */
561 void (*destroy)(struct drm_encoder *encoder); 1047 void (*destroy)(struct drm_encoder *encoder);
562}; 1048};
563 1049
@@ -593,7 +1079,7 @@ struct drm_encoder {
593 struct drm_crtc *crtc; 1079 struct drm_crtc *crtc;
594 struct drm_bridge *bridge; 1080 struct drm_bridge *bridge;
595 const struct drm_encoder_funcs *funcs; 1081 const struct drm_encoder_funcs *funcs;
596 const void *helper_private; 1082 const struct drm_encoder_helper_funcs *helper_private;
597}; 1083};
598 1084
599/* should we poll this connector for connects and disconnects */ 1085/* should we poll this connector for connects and disconnects */
@@ -698,7 +1184,7 @@ struct drm_connector {
698 /* requested DPMS state */ 1184 /* requested DPMS state */
699 int dpms; 1185 int dpms;
700 1186
701 const void *helper_private; 1187 const struct drm_connector_helper_funcs *helper_private;
702 1188
703 /* forced on connector */ 1189 /* forced on connector */
704 struct drm_cmdline_mode cmdline_mode; 1190 struct drm_cmdline_mode cmdline_mode;
@@ -778,40 +1264,203 @@ struct drm_plane_state {
778 1264
779/** 1265/**
780 * struct drm_plane_funcs - driver plane control functions 1266 * struct drm_plane_funcs - driver plane control functions
781 * @update_plane: update the plane configuration
782 * @disable_plane: shut down the plane
783 * @destroy: clean up plane resources
784 * @reset: reset plane after state has been invalidated (e.g. resume)
785 * @set_property: called when a property is changed
786 * @atomic_duplicate_state: duplicate the atomic state for this plane
787 * @atomic_destroy_state: destroy an atomic state for this plane
788 * @atomic_set_property: set a property on an atomic state for this plane
789 * (do not call directly, use drm_atomic_plane_set_property())
790 * @atomic_get_property: get a property on an atomic state for this plane
791 * (do not call directly, use drm_atomic_plane_get_property())
792 */ 1267 */
793struct drm_plane_funcs { 1268struct drm_plane_funcs {
1269 /**
1270 * @update_plane:
1271 *
1272 * This is the legacy entry point to enable and configure the plane for
1273 * the given CRTC and framebuffer. It is never called to disable the
1274 * plane, i.e. the passed-in crtc and fb paramters are never NULL.
1275 *
1276 * The source rectangle in frame buffer memory coordinates is given by
1277 * the src_x, src_y, src_w and src_h parameters (as 16.16 fixed point
1278 * values). Devices that don't support subpixel plane coordinates can
1279 * ignore the fractional part.
1280 *
1281 * The destination rectangle in CRTC coordinates is given by the
1282 * crtc_x, crtc_y, crtc_w and crtc_h parameters (as integer values).
1283 * Devices scale the source rectangle to the destination rectangle. If
1284 * scaling is not supported, and the source rectangle size doesn't match
1285 * the destination rectangle size, the driver must return a
1286 * -<errorname>EINVAL</errorname> error.
1287 *
1288 * Drivers implementing atomic modeset should use
1289 * drm_atomic_helper_update_plane() to implement this hook.
1290 *
1291 * RETURNS:
1292 *
1293 * 0 on success or a negative error code on failure.
1294 */
794 int (*update_plane)(struct drm_plane *plane, 1295 int (*update_plane)(struct drm_plane *plane,
795 struct drm_crtc *crtc, struct drm_framebuffer *fb, 1296 struct drm_crtc *crtc, struct drm_framebuffer *fb,
796 int crtc_x, int crtc_y, 1297 int crtc_x, int crtc_y,
797 unsigned int crtc_w, unsigned int crtc_h, 1298 unsigned int crtc_w, unsigned int crtc_h,
798 uint32_t src_x, uint32_t src_y, 1299 uint32_t src_x, uint32_t src_y,
799 uint32_t src_w, uint32_t src_h); 1300 uint32_t src_w, uint32_t src_h);
1301
1302 /**
1303 * @disable_plane:
1304 *
1305 * This is the legacy entry point to disable the plane. The DRM core
1306 * calls this method in response to a DRM_IOCTL_MODE_SETPLANE IOCTL call
1307 * with the frame buffer ID set to 0. Disabled planes must not be
1308 * processed by the CRTC.
1309 *
1310 * Drivers implementing atomic modeset should use
1311 * drm_atomic_helper_disable_plane() to implement this hook.
1312 *
1313 * RETURNS:
1314 *
1315 * 0 on success or a negative error code on failure.
1316 */
800 int (*disable_plane)(struct drm_plane *plane); 1317 int (*disable_plane)(struct drm_plane *plane);
1318
1319 /**
1320 * @destroy:
1321 *
1322 * Clean up plane resources. This is only called at driver unload time
1323 * through drm_mode_config_cleanup() since a plane cannot be hotplugged
1324 * in DRM.
1325 */
801 void (*destroy)(struct drm_plane *plane); 1326 void (*destroy)(struct drm_plane *plane);
1327
1328 /**
1329 * @reset:
1330 *
1331 * Reset plane hardware and software state to off. This function isn't
1332 * called by the core directly, only through drm_mode_config_reset().
1333 * It's not a helper hook only for historical reasons.
1334 *
1335 * Atomic drivers can use drm_atomic_helper_plane_reset() to reset
1336 * atomic state using this hook.
1337 */
802 void (*reset)(struct drm_plane *plane); 1338 void (*reset)(struct drm_plane *plane);
803 1339
1340 /**
1341 * @set_property:
1342 *
1343 * This is the legacy entry point to update a property attached to the
1344 * plane.
1345 *
1346 * Drivers implementing atomic modeset should use
1347 * drm_atomic_helper_plane_set_property() to implement this hook.
1348 *
1349 * This callback is optional if the driver does not support any legacy
1350 * driver-private properties.
1351 *
1352 * RETURNS:
1353 *
1354 * 0 on success or a negative error code on failure.
1355 */
804 int (*set_property)(struct drm_plane *plane, 1356 int (*set_property)(struct drm_plane *plane,
805 struct drm_property *property, uint64_t val); 1357 struct drm_property *property, uint64_t val);
806 1358
807 /* atomic update handling */ 1359 /**
1360 * @atomic_duplicate_state:
1361 *
1362 * Duplicate the current atomic state for this plane and return it.
1363 * The core and helpers gurantee that any atomic state duplicated with
1364 * this hook and still owned by the caller (i.e. not transferred to the
1365 * driver by calling ->atomic_commit() from struct
1366 * &drm_mode_config_funcs) will be cleaned up by calling the
1367 * @atomic_destroy_state hook in this structure.
1368 *
1369 * Atomic drivers which don't subclass struct &drm_plane_state should use
1370 * drm_atomic_helper_plane_duplicate_state(). Drivers that subclass the
1371 * state structure to extend it with driver-private state should use
1372 * __drm_atomic_helper_plane_duplicate_state() to make sure shared state is
1373 * duplicated in a consistent fashion across drivers.
1374 *
1375 * It is an error to call this hook before plane->state has been
1376 * initialized correctly.
1377 *
1378 * NOTE:
1379 *
1380 * If the duplicate state references refcounted resources this hook must
1381 * acquire a reference for each of them. The driver must release these
1382 * references again in @atomic_destroy_state.
1383 *
1384 * RETURNS:
1385 *
1386 * Duplicated atomic state or NULL when the allocation failed.
1387 */
808 struct drm_plane_state *(*atomic_duplicate_state)(struct drm_plane *plane); 1388 struct drm_plane_state *(*atomic_duplicate_state)(struct drm_plane *plane);
1389
1390 /**
1391 * @atomic_destroy_state:
1392 *
1393 * Destroy a state duplicated with @atomic_duplicate_state and release
1394 * or unreference all resources it references
1395 */
809 void (*atomic_destroy_state)(struct drm_plane *plane, 1396 void (*atomic_destroy_state)(struct drm_plane *plane,
810 struct drm_plane_state *state); 1397 struct drm_plane_state *state);
1398
1399 /**
1400 * @atomic_set_property:
1401 *
1402 * Decode a driver-private property value and store the decoded value
1403 * into the passed-in state structure. Since the atomic core decodes all
1404 * standardized properties (even for extensions beyond the core set of
1405 * properties which might not be implemented by all drivers) this
1406 * requires drivers to subclass the state structure.
1407 *
1408 * Such driver-private properties should really only be implemented for
1409 * truly hardware/vendor specific state. Instead it is preferred to
1410 * standardize atomic extension and decode the properties used to expose
1411 * such an extension in the core.
1412 *
1413 * Do not call this function directly, use
1414 * drm_atomic_plane_set_property() instead.
1415 *
1416 * This callback is optional if the driver does not support any
1417 * driver-private atomic properties.
1418 *
1419 * NOTE:
1420 *
1421 * This function is called in the state assembly phase of atomic
1422 * modesets, which can be aborted for any reason (including on
1423 * userspace's request to just check whether a configuration would be
1424 * possible). Drivers MUST NOT touch any persistent state (hardware or
1425 * software) or data structures except the passed in @state parameter.
1426 *
1427 * Also since userspace controls in which order properties are set this
1428 * function must not do any input validation (since the state update is
1429 * incomplete and hence likely inconsistent). Instead any such input
1430 * validation must be done in the various atomic_check callbacks.
1431 *
1432 * RETURNS:
1433 *
1434 * 0 if the property has been found, -EINVAL if the property isn't
1435 * implemented by the driver (which shouldn't ever happen, the core only
1436 * asks for properties attached to this plane). No other validation is
1437 * allowed by the driver. The core already checks that the property
1438 * value is within the range (integer, valid enum value, ...) the driver
1439 * set when registering the property.
1440 */
811 int (*atomic_set_property)(struct drm_plane *plane, 1441 int (*atomic_set_property)(struct drm_plane *plane,
812 struct drm_plane_state *state, 1442 struct drm_plane_state *state,
813 struct drm_property *property, 1443 struct drm_property *property,
814 uint64_t val); 1444 uint64_t val);
1445
1446 /**
1447 * @atomic_get_property:
1448 *
1449 * Reads out the decoded driver-private property. This is used to
1450 * implement the GETPLANE IOCTL.
1451 *
1452 * Do not call this function directly, use
1453 * drm_atomic_plane_get_property() instead.
1454 *
1455 * This callback is optional if the driver does not support any
1456 * driver-private atomic properties.
1457 *
1458 * RETURNS:
1459 *
1460 * 0 on success, -EINVAL if the property isn't implemented by the
1461 * driver (which should never happen, the core only asks for
1462 * properties attached to this plane).
1463 */
815 int (*atomic_get_property)(struct drm_plane *plane, 1464 int (*atomic_get_property)(struct drm_plane *plane,
816 const struct drm_plane_state *state, 1465 const struct drm_plane_state *state,
817 struct drm_property *property, 1466 struct drm_property *property,
@@ -824,6 +1473,7 @@ enum drm_plane_type {
824 DRM_PLANE_TYPE_CURSOR, 1473 DRM_PLANE_TYPE_CURSOR,
825}; 1474};
826 1475
1476
827/** 1477/**
828 * struct drm_plane - central DRM plane control structure 1478 * struct drm_plane - central DRM plane control structure
829 * @dev: DRM device this plane belongs to 1479 * @dev: DRM device this plane belongs to
@@ -846,6 +1496,8 @@ struct drm_plane {
846 struct drm_device *dev; 1496 struct drm_device *dev;
847 struct list_head head; 1497 struct list_head head;
848 1498
1499 char *name;
1500
849 struct drm_modeset_lock mutex; 1501 struct drm_modeset_lock mutex;
850 1502
851 struct drm_mode_object base; 1503 struct drm_mode_object base;
@@ -866,7 +1518,7 @@ struct drm_plane {
866 1518
867 enum drm_plane_type type; 1519 enum drm_plane_type type;
868 1520
869 const void *helper_private; 1521 const struct drm_plane_helper_funcs *helper_private;
870 1522
871 struct drm_plane_state *state; 1523 struct drm_plane_state *state;
872}; 1524};
@@ -874,24 +1526,114 @@ struct drm_plane {
874/** 1526/**
875 * struct drm_bridge_funcs - drm_bridge control functions 1527 * struct drm_bridge_funcs - drm_bridge control functions
876 * @attach: Called during drm_bridge_attach 1528 * @attach: Called during drm_bridge_attach
877 * @mode_fixup: Try to fixup (or reject entirely) proposed mode for this bridge
878 * @disable: Called right before encoder prepare, disables the bridge
879 * @post_disable: Called right after encoder prepare, for lockstepped disable
880 * @mode_set: Set this mode to the bridge
881 * @pre_enable: Called right before encoder commit, for lockstepped commit
882 * @enable: Called right after encoder commit, enables the bridge
883 */ 1529 */
884struct drm_bridge_funcs { 1530struct drm_bridge_funcs {
885 int (*attach)(struct drm_bridge *bridge); 1531 int (*attach)(struct drm_bridge *bridge);
1532
1533 /**
1534 * @mode_fixup:
1535 *
1536 * This callback is used to validate and adjust a mode. The paramater
1537 * mode is the display mode that should be fed to the next element in
1538 * the display chain, either the final &drm_connector or the next
1539 * &drm_bridge. The parameter adjusted_mode is the input mode the bridge
1540 * requires. It can be modified by this callback and does not need to
1541 * match mode.
1542 *
1543 * This is the only hook that allows a bridge to reject a modeset. If
1544 * this function passes all other callbacks must succeed for this
1545 * configuration.
1546 *
1547 * NOTE:
1548 *
1549 * This function is called in the check phase of atomic modesets, which
1550 * can be aborted for any reason (including on userspace's request to
1551 * just check whether a configuration would be possible). Drivers MUST
1552 * NOT touch any persistent state (hardware or software) or data
1553 * structures except the passed in @state parameter.
1554 *
1555 * RETURNS:
1556 *
1557 * True if an acceptable configuration is possible, false if the modeset
1558 * operation should be rejected.
1559 */
886 bool (*mode_fixup)(struct drm_bridge *bridge, 1560 bool (*mode_fixup)(struct drm_bridge *bridge,
887 const struct drm_display_mode *mode, 1561 const struct drm_display_mode *mode,
888 struct drm_display_mode *adjusted_mode); 1562 struct drm_display_mode *adjusted_mode);
1563 /**
1564 * @disable:
1565 *
1566 * This callback should disable the bridge. It is called right before
1567 * the preceding element in the display pipe is disabled. If the
1568 * preceding element is a bridge this means it's called before that
1569 * bridge's ->disable() function. If the preceding element is a
1570 * &drm_encoder it's called right before the encoder's ->disable(),
1571 * ->prepare() or ->dpms() hook from struct &drm_encoder_helper_funcs.
1572 *
1573 * The bridge can assume that the display pipe (i.e. clocks and timing
1574 * signals) feeding it is still running when this callback is called.
1575 */
889 void (*disable)(struct drm_bridge *bridge); 1576 void (*disable)(struct drm_bridge *bridge);
1577
1578 /**
1579 * @post_disable:
1580 *
1581 * This callback should disable the bridge. It is called right after
1582 * the preceding element in the display pipe is disabled. If the
1583 * preceding element is a bridge this means it's called after that
1584 * bridge's ->post_disable() function. If the preceding element is a
1585 * &drm_encoder it's called right after the encoder's ->disable(),
1586 * ->prepare() or ->dpms() hook from struct &drm_encoder_helper_funcs.
1587 *
1588 * The bridge must assume that the display pipe (i.e. clocks and timing
1589 * singals) feeding it is no longer running when this callback is
1590 * called.
1591 */
890 void (*post_disable)(struct drm_bridge *bridge); 1592 void (*post_disable)(struct drm_bridge *bridge);
1593
1594 /**
1595 * @mode_set:
1596 *
1597 * This callback should set the given mode on the bridge. It is called
1598 * after the ->mode_set() callback for the preceding element in the
1599 * display pipeline has been called already. The display pipe (i.e.
1600 * clocks and timing signals) is off when this function is called.
1601 */
891 void (*mode_set)(struct drm_bridge *bridge, 1602 void (*mode_set)(struct drm_bridge *bridge,
892 struct drm_display_mode *mode, 1603 struct drm_display_mode *mode,
893 struct drm_display_mode *adjusted_mode); 1604 struct drm_display_mode *adjusted_mode);
1605 /**
1606 * @pre_enable:
1607 *
1608 * This callback should enable the bridge. It is called right before
1609 * the preceding element in the display pipe is enabled. If the
1610 * preceding element is a bridge this means it's called before that
1611 * bridge's ->pre_enable() function. If the preceding element is a
1612 * &drm_encoder it's called right before the encoder's ->enable(),
1613 * ->commit() or ->dpms() hook from struct &drm_encoder_helper_funcs.
1614 *
1615 * The display pipe (i.e. clocks and timing signals) feeding this bridge
1616 * will not yet be running when this callback is called. The bridge must
1617 * not enable the display link feeding the next bridge in the chain (if
1618 * there is one) when this callback is called.
1619 */
894 void (*pre_enable)(struct drm_bridge *bridge); 1620 void (*pre_enable)(struct drm_bridge *bridge);
1621
1622 /**
1623 * @enable:
1624 *
1625 * This callback should enable the bridge. It is called right after
1626 * the preceding element in the display pipe is enabled. If the
1627 * preceding element is a bridge this means it's called after that
1628 * bridge's ->enable() function. If the preceding element is a
1629 * &drm_encoder it's called right after the encoder's ->enable(),
1630 * ->commit() or ->dpms() hook from struct &drm_encoder_helper_funcs.
1631 *
1632 * The bridge can assume that the display pipe (i.e. clocks and timing
1633 * signals) feeding it is running when this callback is called. This
1634 * callback must enable the display link feeding the next bridge in the
1635 * chain if there is one.
1636 */
895 void (*enable)(struct drm_bridge *bridge); 1637 void (*enable)(struct drm_bridge *bridge);
896}; 1638};
897 1639
@@ -922,7 +1664,7 @@ struct drm_bridge {
922 * struct drm_atomic_state - the global state object for atomic updates 1664 * struct drm_atomic_state - the global state object for atomic updates
923 * @dev: parent DRM device 1665 * @dev: parent DRM device
924 * @allow_modeset: allow full modeset 1666 * @allow_modeset: allow full modeset
925 * @legacy_cursor_update: hint to enforce legacy cursor ioctl semantics 1667 * @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
926 * @planes: pointer to array of plane pointers 1668 * @planes: pointer to array of plane pointers
927 * @plane_states: pointer to array of plane states pointers 1669 * @plane_states: pointer to array of plane states pointers
928 * @crtcs: pointer to array of CRTC pointers 1670 * @crtcs: pointer to array of CRTC pointers
@@ -977,31 +1719,254 @@ struct drm_mode_set {
977 1719
978/** 1720/**
979 * struct drm_mode_config_funcs - basic driver provided mode setting functions 1721 * struct drm_mode_config_funcs - basic driver provided mode setting functions
980 * @fb_create: create a new framebuffer object
981 * @output_poll_changed: function to handle output configuration changes
982 * @atomic_check: check whether a given atomic state update is possible
983 * @atomic_commit: commit an atomic state update previously verified with
984 * atomic_check()
985 * @atomic_state_alloc: allocate a new atomic state
986 * @atomic_state_clear: clear the atomic state
987 * @atomic_state_free: free the atomic state
988 * 1722 *
989 * Some global (i.e. not per-CRTC, connector, etc) mode setting functions that 1723 * Some global (i.e. not per-CRTC, connector, etc) mode setting functions that
990 * involve drivers. 1724 * involve drivers.
991 */ 1725 */
992struct drm_mode_config_funcs { 1726struct drm_mode_config_funcs {
1727 /**
1728 * @fb_create:
1729 *
1730 * Create a new framebuffer object. The core does basic checks on the
1731 * requested metadata, but most of that is left to the driver. See
1732 * struct &drm_mode_fb_cmd2 for details.
1733 *
1734 * RETURNS:
1735 *
1736 * A new framebuffer with an initial reference count of 1 or a negative
1737 * error code encoded with ERR_PTR().
1738 */
993 struct drm_framebuffer *(*fb_create)(struct drm_device *dev, 1739 struct drm_framebuffer *(*fb_create)(struct drm_device *dev,
994 struct drm_file *file_priv, 1740 struct drm_file *file_priv,
995 struct drm_mode_fb_cmd2 *mode_cmd); 1741 const struct drm_mode_fb_cmd2 *mode_cmd);
1742
1743 /**
1744 * @output_poll_changed:
1745 *
1746 * Callback used by helpers to inform the driver of output configuration
1747 * changes.
1748 *
1749 * Drivers implementing fbdev emulation with the helpers can call
1750 * drm_fb_helper_hotplug_changed from this hook to inform the fbdev
1751 * helper of output changes.
1752 *
1753 * FIXME:
1754 *
1755 * Except that there's no vtable for device-level helper callbacks
1756 * there's no reason this is a core function.
1757 */
996 void (*output_poll_changed)(struct drm_device *dev); 1758 void (*output_poll_changed)(struct drm_device *dev);
997 1759
1760 /**
1761 * @atomic_check:
1762 *
1763 * This is the only hook to validate an atomic modeset update. This
1764 * function must reject any modeset and state changes which the hardware
1765 * or driver doesn't support. This includes but is of course not limited
1766 * to:
1767 *
1768 * - Checking that the modes, framebuffers, scaling and placement
1769 * requirements and so on are within the limits of the hardware.
1770 *
1771 * - Checking that any hidden shared resources are not oversubscribed.
1772 * This can be shared PLLs, shared lanes, overall memory bandwidth,
1773 * display fifo space (where shared between planes or maybe even
1774 * CRTCs).
1775 *
1776 * - Checking that virtualized resources exported to userspace are not
1777 * oversubscribed. For various reasons it can make sense to expose
1778 * more planes, crtcs or encoders than which are physically there. One
1779 * example is dual-pipe operations (which generally should be hidden
1780 * from userspace if when lockstepped in hardware, exposed otherwise),
1781 * where a plane might need 1 hardware plane (if it's just on one
1782 * pipe), 2 hardware planes (when it spans both pipes) or maybe even
1783 * shared a hardware plane with a 2nd plane (if there's a compatible
1784 * plane requested on the area handled by the other pipe).
1785 *
1786 * - Check that any transitional state is possible and that if
1787 * requested, the update can indeed be done in the vblank period
1788 * without temporarily disabling some functions.
1789 *
1790 * - Check any other constraints the driver or hardware might have.
1791 *
1792 * - This callback also needs to correctly fill out the &drm_crtc_state
1793 * in this update to make sure that drm_atomic_crtc_needs_modeset()
1794 * reflects the nature of the possible update and returns true if and
1795 * only if the update cannot be applied without tearing within one
1796 * vblank on that CRTC. The core uses that information to reject
1797 * updates which require a full modeset (i.e. blanking the screen, or
1798 * at least pausing updates for a substantial amount of time) if
1799 * userspace has disallowed that in its request.
1800 *
1801 * - The driver also does not need to repeat basic input validation
1802 * like done for the corresponding legacy entry points. The core does
1803 * that before calling this hook.
1804 *
1805 * See the documentation of @atomic_commit for an exhaustive list of
1806 * error conditions which don't have to be checked at the
1807 * ->atomic_check() stage?
1808 *
1809 * See the documentation for struct &drm_atomic_state for how exactly
1810 * an atomic modeset update is described.
1811 *
1812 * Drivers using the atomic helpers can implement this hook using
1813 * drm_atomic_helper_check(), or one of the exported sub-functions of
1814 * it.
1815 *
1816 * RETURNS:
1817 *
1818 * 0 on success or one of the below negative error codes:
1819 *
1820 * - -EINVAL, if any of the above constraints are violated.
1821 *
1822 * - -EDEADLK, when returned from an attempt to acquire an additional
1823 * &drm_modeset_lock through drm_modeset_lock().
1824 *
1825 * - -ENOMEM, if allocating additional state sub-structures failed due
1826 * to lack of memory.
1827 *
1828 * - -EINTR, -EAGAIN or -ERESTARTSYS, if the IOCTL should be restarted.
1829 * This can either be due to a pending signal, or because the driver
1830 * needs to completely bail out to recover from an exceptional
1831 * situation like a GPU hang. From a userspace point all errors are
1832 * treated equally.
1833 */
998 int (*atomic_check)(struct drm_device *dev, 1834 int (*atomic_check)(struct drm_device *dev,
999 struct drm_atomic_state *a); 1835 struct drm_atomic_state *state);
1836
1837 /**
1838 * @atomic_commit:
1839 *
1840 * This is the only hook to commit an atomic modeset update. The core
1841 * guarantees that @atomic_check has been called successfully before
1842 * calling this function, and that nothing has been changed in the
1843 * interim.
1844 *
1845 * See the documentation for struct &drm_atomic_state for how exactly
1846 * an atomic modeset update is described.
1847 *
1848 * Drivers using the atomic helpers can implement this hook using
1849 * drm_atomic_helper_commit(), or one of the exported sub-functions of
1850 * it.
1851 *
1852 * Asynchronous commits (as indicated with the async parameter) must
1853 * do any preparatory work which might result in an unsuccessful commit
1854 * in the context of this callback. The only exceptions are hardware
1855 * errors resulting in -EIO. But even in that case the driver must
1856 * ensure that the display pipe is at least running, to avoid
1857 * compositors crashing when pageflips don't work. Anything else,
1858 * specifically committing the update to the hardware, should be done
1859 * without blocking the caller. For updates which do not require a
1860 * modeset this must be guaranteed.
1861 *
1862 * The driver must wait for any pending rendering to the new
1863 * framebuffers to complete before executing the flip. It should also
1864 * wait for any pending rendering from other drivers if the underlying
1865 * buffer is a shared dma-buf. Asynchronous commits must not wait for
1866 * rendering in the context of this callback.
1867 *
1868 * An application can request to be notified when the atomic commit has
1869 * completed. These events are per-CRTC and can be distinguished by the
1870 * CRTC index supplied in &drm_event to userspace.
1871 *
1872 * The drm core will supply a struct &drm_event in the event
1873 * member of each CRTC's &drm_crtc_state structure. This can be handled by the
1874 * drm_crtc_send_vblank_event() function, which the driver should call on
1875 * the provided event upon completion of the atomic commit. Note that if
1876 * the driver supports vblank signalling and timestamping the vblank
1877 * counters and timestamps must agree with the ones returned from page
1878 * flip events. With the current vblank helper infrastructure this can
1879 * be achieved by holding a vblank reference while the page flip is
1880 * pending, acquired through drm_crtc_vblank_get() and released with
1881 * drm_crtc_vblank_put(). Drivers are free to implement their own vblank
1882 * counter and timestamp tracking though, e.g. if they have accurate
1883 * timestamp registers in hardware.
1884 *
1885 * NOTE:
1886 *
1887 * Drivers are not allowed to shut down any display pipe successfully
1888 * enabled through an atomic commit on their own. Doing so can result in
1889 * compositors crashing if a page flip is suddenly rejected because the
1890 * pipe is off.
1891 *
1892 * RETURNS:
1893 *
1894 * 0 on success or one of the below negative error codes:
1895 *
1896 * - -EBUSY, if an asynchronous updated is requested and there is
1897 * an earlier updated pending. Drivers are allowed to support a queue
1898 * of outstanding updates, but currently no driver supports that.
1899 * Note that drivers must wait for preceding updates to complete if a
1900 * synchronous update is requested, they are not allowed to fail the
1901 * commit in that case.
1902 *
1903 * - -ENOMEM, if the driver failed to allocate memory. Specifically
1904 * this can happen when trying to pin framebuffers, which must only
1905 * be done when committing the state.
1906 *
1907 * - -ENOSPC, as a refinement of the more generic -ENOMEM to indicate
1908 * that the driver has run out of vram, iommu space or similar GPU
1909 * address space needed for framebuffer.
1910 *
1911 * - -EIO, if the hardware completely died.
1912 *
1913 * - -EINTR, -EAGAIN or -ERESTARTSYS, if the IOCTL should be restarted.
1914 * This can either be due to a pending signal, or because the driver
1915 * needs to completely bail out to recover from an exceptional
1916 * situation like a GPU hang. From a userspace point of view all errors are
1917 * treated equally.
1918 *
1919 * This list is exhaustive. Specifically this hook is not allowed to
1920 * return -EINVAL (any invalid requests should be caught in
1921 * @atomic_check) or -EDEADLK (this function must not acquire
1922 * additional modeset locks).
1923 */
1000 int (*atomic_commit)(struct drm_device *dev, 1924 int (*atomic_commit)(struct drm_device *dev,
1001 struct drm_atomic_state *a, 1925 struct drm_atomic_state *state,
1002 bool async); 1926 bool async);
1927
1928 /**
1929 * @atomic_state_alloc:
1930 *
1931 * This optional hook can be used by drivers that want to subclass struct
1932 * &drm_atomic_state to be able to track their own driver-private global
1933 * state easily. If this hook is implemented, drivers must also
1934 * implement @atomic_state_clear and @atomic_state_free.
1935 *
1936 * RETURNS:
1937 *
1938 * A new &drm_atomic_state on success or NULL on failure.
1939 */
1003 struct drm_atomic_state *(*atomic_state_alloc)(struct drm_device *dev); 1940 struct drm_atomic_state *(*atomic_state_alloc)(struct drm_device *dev);
1941
1942 /**
1943 * @atomic_state_clear:
1944 *
1945 * This hook must clear any driver private state duplicated into the
1946 * passed-in &drm_atomic_state. This hook is called when the caller
1947 * encountered a &drm_modeset_lock deadlock and needs to drop all
1948 * already acquired locks as part of the deadlock avoidance dance
1949 * implemented in drm_modeset_lock_backoff().
1950 *
1951 * Any duplicated state must be invalidated since a concurrent atomic
1952 * update might change it, and the drm atomic interfaces always apply
1953 * updates as relative changes to the current state.
1954 *
1955 * Drivers that implement this must call drm_atomic_state_default_clear()
1956 * to clear common state.
1957 */
1004 void (*atomic_state_clear)(struct drm_atomic_state *state); 1958 void (*atomic_state_clear)(struct drm_atomic_state *state);
1959
1960 /**
1961 * @atomic_state_free:
1962 *
1963 * This hook needs driver private resources and the &drm_atomic_state
1964 * itself. Note that the core first calls drm_atomic_state_clear() to
1965 * avoid code duplicate between the clear and free hooks.
1966 *
1967 * Drivers that implement this must call drm_atomic_state_default_free()
1968 * to release common resources.
1969 */
1005 void (*atomic_state_free)(struct drm_atomic_state *state); 1970 void (*atomic_state_free)(struct drm_atomic_state *state);
1006}; 1971};
1007 1972
@@ -1010,7 +1975,7 @@ struct drm_mode_config_funcs {
1010 * @mutex: mutex protecting KMS related lists and structures 1975 * @mutex: mutex protecting KMS related lists and structures
1011 * @connection_mutex: ww mutex protecting connector state and routing 1976 * @connection_mutex: ww mutex protecting connector state and routing
1012 * @acquire_ctx: global implicit acquire context used by atomic drivers for 1977 * @acquire_ctx: global implicit acquire context used by atomic drivers for
1013 * legacy ioctls 1978 * legacy IOCTLs
1014 * @idr_mutex: mutex for KMS ID allocation and management 1979 * @idr_mutex: mutex for KMS ID allocation and management
1015 * @crtc_idr: main KMS ID tracking object 1980 * @crtc_idr: main KMS ID tracking object
1016 * @fb_lock: mutex to protect fb state and lists 1981 * @fb_lock: mutex to protect fb state and lists
@@ -1166,7 +2131,7 @@ struct drm_mode_config {
1166 */ 2131 */
1167#define drm_for_each_plane_mask(plane, dev, plane_mask) \ 2132#define drm_for_each_plane_mask(plane, dev, plane_mask) \
1168 list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \ 2133 list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \
1169 if ((plane_mask) & (1 << drm_plane_index(plane))) 2134 for_each_if ((plane_mask) & (1 << drm_plane_index(plane)))
1170 2135
1171 2136
1172#define obj_to_crtc(x) container_of(x, struct drm_crtc, base) 2137#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
@@ -1183,11 +2148,13 @@ struct drm_prop_enum_list {
1183 char *name; 2148 char *name;
1184}; 2149};
1185 2150
1186extern int drm_crtc_init_with_planes(struct drm_device *dev, 2151extern __printf(6, 7)
1187 struct drm_crtc *crtc, 2152int drm_crtc_init_with_planes(struct drm_device *dev,
1188 struct drm_plane *primary, 2153 struct drm_crtc *crtc,
1189 struct drm_plane *cursor, 2154 struct drm_plane *primary,
1190 const struct drm_crtc_funcs *funcs); 2155 struct drm_plane *cursor,
2156 const struct drm_crtc_funcs *funcs,
2157 const char *name, ...);
1191extern void drm_crtc_cleanup(struct drm_crtc *crtc); 2158extern void drm_crtc_cleanup(struct drm_crtc *crtc);
1192extern unsigned int drm_crtc_index(struct drm_crtc *crtc); 2159extern unsigned int drm_crtc_index(struct drm_crtc *crtc);
1193 2160
@@ -1233,10 +2200,11 @@ void drm_bridge_mode_set(struct drm_bridge *bridge,
1233void drm_bridge_pre_enable(struct drm_bridge *bridge); 2200void drm_bridge_pre_enable(struct drm_bridge *bridge);
1234void drm_bridge_enable(struct drm_bridge *bridge); 2201void drm_bridge_enable(struct drm_bridge *bridge);
1235 2202
1236extern int drm_encoder_init(struct drm_device *dev, 2203extern __printf(5, 6)
1237 struct drm_encoder *encoder, 2204int drm_encoder_init(struct drm_device *dev,
1238 const struct drm_encoder_funcs *funcs, 2205 struct drm_encoder *encoder,
1239 int encoder_type); 2206 const struct drm_encoder_funcs *funcs,
2207 int encoder_type, const char *name, ...);
1240 2208
1241/** 2209/**
1242 * drm_encoder_crtc_ok - can a given crtc drive a given encoder? 2210 * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
@@ -1251,13 +2219,15 @@ static inline bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
1251 return !!(encoder->possible_crtcs & drm_crtc_mask(crtc)); 2219 return !!(encoder->possible_crtcs & drm_crtc_mask(crtc));
1252} 2220}
1253 2221
1254extern int drm_universal_plane_init(struct drm_device *dev, 2222extern __printf(8, 9)
1255 struct drm_plane *plane, 2223int drm_universal_plane_init(struct drm_device *dev,
1256 unsigned long possible_crtcs, 2224 struct drm_plane *plane,
1257 const struct drm_plane_funcs *funcs, 2225 unsigned long possible_crtcs,
1258 const uint32_t *formats, 2226 const struct drm_plane_funcs *funcs,
1259 unsigned int format_count, 2227 const uint32_t *formats,
1260 enum drm_plane_type type); 2228 unsigned int format_count,
2229 enum drm_plane_type type,
2230 const char *name, ...);
1261extern int drm_plane_init(struct drm_device *dev, 2231extern int drm_plane_init(struct drm_device *dev,
1262 struct drm_plane *plane, 2232 struct drm_plane *plane,
1263 unsigned long possible_crtcs, 2233 unsigned long possible_crtcs,
@@ -1543,7 +2513,7 @@ static inline struct drm_property *drm_property_find(struct drm_device *dev,
1543/* Plane list iterator for legacy (overlay only) planes. */ 2513/* Plane list iterator for legacy (overlay only) planes. */
1544#define drm_for_each_legacy_plane(plane, dev) \ 2514#define drm_for_each_legacy_plane(plane, dev) \
1545 list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \ 2515 list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \
1546 if (plane->type == DRM_PLANE_TYPE_OVERLAY) 2516 for_each_if (plane->type == DRM_PLANE_TYPE_OVERLAY)
1547 2517
1548#define drm_for_each_plane(plane, dev) \ 2518#define drm_for_each_plane(plane, dev) \
1549 list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) 2519 list_for_each_entry(plane, &(dev)->mode_config.plane_list, head)
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 3febb4b9fce9..4b37afa2b73b 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -40,148 +40,7 @@
40#include <linux/fb.h> 40#include <linux/fb.h>
41 41
42#include <drm/drm_crtc.h> 42#include <drm/drm_crtc.h>
43 43#include <drm/drm_modeset_helper_vtables.h>
44enum mode_set_atomic {
45 LEAVE_ATOMIC_MODE_SET,
46 ENTER_ATOMIC_MODE_SET,
47};
48
49/**
50 * struct drm_crtc_helper_funcs - helper operations for CRTCs
51 * @dpms: set power state
52 * @prepare: prepare the CRTC, called before @mode_set
53 * @commit: commit changes to CRTC, called after @mode_set
54 * @mode_fixup: try to fixup proposed mode for this CRTC
55 * @mode_set: set this mode
56 * @mode_set_nofb: set mode only (no scanout buffer attached)
57 * @mode_set_base: update the scanout buffer
58 * @mode_set_base_atomic: non-blocking mode set (used for kgdb support)
59 * @load_lut: load color palette
60 * @disable: disable CRTC when no longer in use
61 * @enable: enable CRTC
62 * @atomic_check: check for validity of an atomic state
63 * @atomic_begin: begin atomic update
64 * @atomic_flush: flush atomic update
65 *
66 * The helper operations are called by the mid-layer CRTC helper.
67 *
68 * Note that with atomic helpers @dpms, @prepare and @commit hooks are
69 * deprecated. Used @enable and @disable instead exclusively.
70 *
71 * With legacy crtc helpers there's a big semantic difference between @disable
72 * and the other hooks: @disable also needs to release any resources acquired in
73 * @mode_set (like shared PLLs).
74 */
75struct drm_crtc_helper_funcs {
76 /*
77 * Control power levels on the CRTC. If the mode passed in is
78 * unsupported, the provider must use the next lowest power level.
79 */
80 void (*dpms)(struct drm_crtc *crtc, int mode);
81 void (*prepare)(struct drm_crtc *crtc);
82 void (*commit)(struct drm_crtc *crtc);
83
84 /* Provider can fixup or change mode timings before modeset occurs */
85 bool (*mode_fixup)(struct drm_crtc *crtc,
86 const struct drm_display_mode *mode,
87 struct drm_display_mode *adjusted_mode);
88 /* Actually set the mode */
89 int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
90 struct drm_display_mode *adjusted_mode, int x, int y,
91 struct drm_framebuffer *old_fb);
92 /* Actually set the mode for atomic helpers, optional */
93 void (*mode_set_nofb)(struct drm_crtc *crtc);
94
95 /* Move the crtc on the current fb to the given position *optional* */
96 int (*mode_set_base)(struct drm_crtc *crtc, int x, int y,
97 struct drm_framebuffer *old_fb);
98 int (*mode_set_base_atomic)(struct drm_crtc *crtc,
99 struct drm_framebuffer *fb, int x, int y,
100 enum mode_set_atomic);
101
102 /* reload the current crtc LUT */
103 void (*load_lut)(struct drm_crtc *crtc);
104
105 void (*disable)(struct drm_crtc *crtc);
106 void (*enable)(struct drm_crtc *crtc);
107
108 /* atomic helpers */
109 int (*atomic_check)(struct drm_crtc *crtc,
110 struct drm_crtc_state *state);
111 void (*atomic_begin)(struct drm_crtc *crtc,
112 struct drm_crtc_state *old_crtc_state);
113 void (*atomic_flush)(struct drm_crtc *crtc,
114 struct drm_crtc_state *old_crtc_state);
115};
116
117/**
118 * struct drm_encoder_helper_funcs - helper operations for encoders
119 * @dpms: set power state
120 * @save: save connector state
121 * @restore: restore connector state
122 * @mode_fixup: try to fixup proposed mode for this connector
123 * @prepare: part of the disable sequence, called before the CRTC modeset
124 * @commit: called after the CRTC modeset
125 * @mode_set: set this mode, optional for atomic helpers
126 * @get_crtc: return CRTC that the encoder is currently attached to
127 * @detect: connection status detection
128 * @disable: disable encoder when not in use (overrides DPMS off)
129 * @enable: enable encoder
130 * @atomic_check: check for validity of an atomic update
131 *
132 * The helper operations are called by the mid-layer CRTC helper.
133 *
134 * Note that with atomic helpers @dpms, @prepare and @commit hooks are
135 * deprecated. Used @enable and @disable instead exclusively.
136 *
137 * With legacy crtc helpers there's a big semantic difference between @disable
138 * and the other hooks: @disable also needs to release any resources acquired in
139 * @mode_set (like shared PLLs).
140 */
141struct drm_encoder_helper_funcs {
142 void (*dpms)(struct drm_encoder *encoder, int mode);
143 void (*save)(struct drm_encoder *encoder);
144 void (*restore)(struct drm_encoder *encoder);
145
146 bool (*mode_fixup)(struct drm_encoder *encoder,
147 const struct drm_display_mode *mode,
148 struct drm_display_mode *adjusted_mode);
149 void (*prepare)(struct drm_encoder *encoder);
150 void (*commit)(struct drm_encoder *encoder);
151 void (*mode_set)(struct drm_encoder *encoder,
152 struct drm_display_mode *mode,
153 struct drm_display_mode *adjusted_mode);
154 struct drm_crtc *(*get_crtc)(struct drm_encoder *encoder);
155 /* detect for DAC style encoders */
156 enum drm_connector_status (*detect)(struct drm_encoder *encoder,
157 struct drm_connector *connector);
158 void (*disable)(struct drm_encoder *encoder);
159
160 void (*enable)(struct drm_encoder *encoder);
161
162 /* atomic helpers */
163 int (*atomic_check)(struct drm_encoder *encoder,
164 struct drm_crtc_state *crtc_state,
165 struct drm_connector_state *conn_state);
166};
167
168/**
169 * struct drm_connector_helper_funcs - helper operations for connectors
170 * @get_modes: get mode list for this connector
171 * @mode_valid: is this mode valid on the given connector? (optional)
172 * @best_encoder: return the preferred encoder for this connector
173 * @atomic_best_encoder: atomic version of @best_encoder
174 *
175 * The helper operations are called by the mid-layer CRTC helper.
176 */
177struct drm_connector_helper_funcs {
178 int (*get_modes)(struct drm_connector *connector);
179 enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
180 struct drm_display_mode *mode);
181 struct drm_encoder *(*best_encoder)(struct drm_connector *connector);
182 struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector,
183 struct drm_connector_state *connector_state);
184};
185 44
186extern void drm_helper_disable_unused_functions(struct drm_device *dev); 45extern void drm_helper_disable_unused_functions(struct drm_device *dev);
187extern int drm_crtc_helper_set_config(struct drm_mode_set *set); 46extern int drm_crtc_helper_set_config(struct drm_mode_set *set);
@@ -197,25 +56,7 @@ extern int drm_helper_connector_dpms(struct drm_connector *connector, int mode);
197extern void drm_helper_move_panel_connectors_to_head(struct drm_device *); 56extern void drm_helper_move_panel_connectors_to_head(struct drm_device *);
198 57
199extern void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, 58extern void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
200 struct drm_mode_fb_cmd2 *mode_cmd); 59 const struct drm_mode_fb_cmd2 *mode_cmd);
201
202static inline void drm_crtc_helper_add(struct drm_crtc *crtc,
203 const struct drm_crtc_helper_funcs *funcs)
204{
205 crtc->helper_private = funcs;
206}
207
208static inline void drm_encoder_helper_add(struct drm_encoder *encoder,
209 const struct drm_encoder_helper_funcs *funcs)
210{
211 encoder->helper_private = funcs;
212}
213
214static inline void drm_connector_helper_add(struct drm_connector *connector,
215 const struct drm_connector_helper_funcs *funcs)
216{
217 connector->helper_private = funcs;
218}
219 60
220extern void drm_helper_resume_force_mode(struct drm_device *dev); 61extern void drm_helper_resume_force_mode(struct drm_device *dev);
221 62
@@ -229,10 +70,6 @@ int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
229extern int drm_helper_probe_single_connector_modes(struct drm_connector 70extern int drm_helper_probe_single_connector_modes(struct drm_connector
230 *connector, uint32_t maxX, 71 *connector, uint32_t maxX,
231 uint32_t maxY); 72 uint32_t maxY);
232extern int drm_helper_probe_single_connector_modes_nomerge(struct drm_connector
233 *connector,
234 uint32_t maxX,
235 uint32_t maxY);
236extern void drm_kms_helper_poll_init(struct drm_device *dev); 73extern void drm_kms_helper_poll_init(struct drm_device *dev);
237extern void drm_kms_helper_poll_fini(struct drm_device *dev); 74extern void drm_kms_helper_poll_fini(struct drm_device *dev);
238extern bool drm_helper_hpd_irq_event(struct drm_device *dev); 75extern bool drm_helper_hpd_irq_event(struct drm_device *dev);
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index bb9d0deca07c..1252108da0ef 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -455,16 +455,52 @@
455# define DP_EDP_14 0x03 455# define DP_EDP_14 0x03
456 456
457#define DP_EDP_GENERAL_CAP_1 0x701 457#define DP_EDP_GENERAL_CAP_1 0x701
458# define DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP (1 << 0)
459# define DP_EDP_BACKLIGHT_PIN_ENABLE_CAP (1 << 1)
460# define DP_EDP_BACKLIGHT_AUX_ENABLE_CAP (1 << 2)
461# define DP_EDP_PANEL_SELF_TEST_PIN_ENABLE_CAP (1 << 3)
462# define DP_EDP_PANEL_SELF_TEST_AUX_ENABLE_CAP (1 << 4)
463# define DP_EDP_FRC_ENABLE_CAP (1 << 5)
464# define DP_EDP_COLOR_ENGINE_CAP (1 << 6)
465# define DP_EDP_SET_POWER_CAP (1 << 7)
458 466
459#define DP_EDP_BACKLIGHT_ADJUSTMENT_CAP 0x702 467#define DP_EDP_BACKLIGHT_ADJUSTMENT_CAP 0x702
468# define DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP (1 << 0)
469# define DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP (1 << 1)
470# define DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT (1 << 2)
471# define DP_EDP_BACKLIGHT_AUX_PWM_PRODUCT_CAP (1 << 3)
472# define DP_EDP_BACKLIGHT_FREQ_PWM_PIN_PASSTHRU_CAP (1 << 4)
473# define DP_EDP_BACKLIGHT_FREQ_AUX_SET_CAP (1 << 5)
474# define DP_EDP_DYNAMIC_BACKLIGHT_CAP (1 << 6)
475# define DP_EDP_VBLANK_BACKLIGHT_UPDATE_CAP (1 << 7)
460 476
461#define DP_EDP_GENERAL_CAP_2 0x703 477#define DP_EDP_GENERAL_CAP_2 0x703
478# define DP_EDP_OVERDRIVE_ENGINE_ENABLED (1 << 0)
462 479
463#define DP_EDP_GENERAL_CAP_3 0x704 /* eDP 1.4 */ 480#define DP_EDP_GENERAL_CAP_3 0x704 /* eDP 1.4 */
481# define DP_EDP_X_REGION_CAP_MASK (0xf << 0)
482# define DP_EDP_X_REGION_CAP_SHIFT 0
483# define DP_EDP_Y_REGION_CAP_MASK (0xf << 4)
484# define DP_EDP_Y_REGION_CAP_SHIFT 4
464 485
465#define DP_EDP_DISPLAY_CONTROL_REGISTER 0x720 486#define DP_EDP_DISPLAY_CONTROL_REGISTER 0x720
487# define DP_EDP_BACKLIGHT_ENABLE (1 << 0)
488# define DP_EDP_BLACK_VIDEO_ENABLE (1 << 1)
489# define DP_EDP_FRC_ENABLE (1 << 2)
490# define DP_EDP_COLOR_ENGINE_ENABLE (1 << 3)
491# define DP_EDP_VBLANK_BACKLIGHT_UPDATE_ENABLE (1 << 7)
466 492
467#define DP_EDP_BACKLIGHT_MODE_SET_REGISTER 0x721 493#define DP_EDP_BACKLIGHT_MODE_SET_REGISTER 0x721
494# define DP_EDP_BACKLIGHT_CONTROL_MODE_MASK (3 << 0)
495# define DP_EDP_BACKLIGHT_CONTROL_MODE_PWM (0 << 0)
496# define DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET (1 << 0)
497# define DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD (2 << 0)
498# define DP_EDP_BACKLIGHT_CONTROL_MODE_PRODUCT (3 << 0)
499# define DP_EDP_BACKLIGHT_FREQ_PWM_PIN_PASSTHRU_ENABLE (1 << 2)
500# define DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE (1 << 3)
501# define DP_EDP_DYNAMIC_BACKLIGHT_ENABLE (1 << 4)
502# define DP_EDP_REGIONAL_BACKLIGHT_ENABLE (1 << 5)
503# define DP_EDP_UPDATE_REGION_BRIGHTNESS (1 << 6) /* eDP 1.4 */
468 504
469#define DP_EDP_BACKLIGHT_BRIGHTNESS_MSB 0x722 505#define DP_EDP_BACKLIGHT_BRIGHTNESS_MSB 0x722
470#define DP_EDP_BACKLIGHT_BRIGHTNESS_LSB 0x723 506#define DP_EDP_BACKLIGHT_BRIGHTNESS_LSB 0x723
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
index c54cf3d4a03f..be62bd321e75 100644
--- a/include/drm/drm_fb_cma_helper.h
+++ b/include/drm/drm_fb_cma_helper.h
@@ -18,7 +18,7 @@ void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma);
18void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma); 18void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma);
19 19
20struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev, 20struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
21 struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd); 21 struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd);
22 22
23struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb, 23struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
24 unsigned int plane); 24 unsigned int plane);
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 87b090c4b730..d8a40dff0d1d 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -34,6 +34,11 @@ struct drm_fb_helper;
34 34
35#include <linux/kgdb.h> 35#include <linux/kgdb.h>
36 36
37enum mode_set_atomic {
38 LEAVE_ATOMIC_MODE_SET,
39 ENTER_ATOMIC_MODE_SET,
40};
41
37struct drm_fb_offset { 42struct drm_fb_offset {
38 int x, y; 43 int x, y;
39}; 44};
@@ -74,25 +79,76 @@ struct drm_fb_helper_surface_size {
74 79
75/** 80/**
76 * struct drm_fb_helper_funcs - driver callbacks for the fbdev emulation library 81 * struct drm_fb_helper_funcs - driver callbacks for the fbdev emulation library
77 * @gamma_set: Set the given gamma lut register on the given crtc.
78 * @gamma_get: Read the given gamma lut register on the given crtc, used to
79 * save the current lut when force-restoring the fbdev for e.g.
80 * kdbg.
81 * @fb_probe: Driver callback to allocate and initialize the fbdev info
82 * structure. Furthermore it also needs to allocate the drm
83 * framebuffer used to back the fbdev.
84 * @initial_config: Setup an initial fbdev display configuration
85 * 82 *
86 * Driver callbacks used by the fbdev emulation helper library. 83 * Driver callbacks used by the fbdev emulation helper library.
87 */ 84 */
88struct drm_fb_helper_funcs { 85struct drm_fb_helper_funcs {
86 /**
87 * @gamma_set:
88 *
89 * Set the given gamma LUT register on the given CRTC.
90 *
91 * This callback is optional.
92 *
93 * FIXME:
94 *
95 * This callback is functionally redundant with the core gamma table
96 * support and simply exists because the fbdev hasn't yet been
97 * refactored to use the core gamma table interfaces.
98 */
89 void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green, 99 void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green,
90 u16 blue, int regno); 100 u16 blue, int regno);
101 /**
102 * @gamma_get:
103 *
104 * Read the given gamma LUT register on the given CRTC, used to save the
105 * current LUT when force-restoring the fbdev for e.g. kdbg.
106 *
107 * This callback is optional.
108 *
109 * FIXME:
110 *
111 * This callback is functionally redundant with the core gamma table
112 * support and simply exists because the fbdev hasn't yet been
113 * refactored to use the core gamma table interfaces.
114 */
91 void (*gamma_get)(struct drm_crtc *crtc, u16 *red, u16 *green, 115 void (*gamma_get)(struct drm_crtc *crtc, u16 *red, u16 *green,
92 u16 *blue, int regno); 116 u16 *blue, int regno);
93 117
118 /**
119 * @fb_probe:
120 *
121 * Driver callback to allocate and initialize the fbdev info structure.
122 * Furthermore it also needs to allocate the DRM framebuffer used to
123 * back the fbdev.
124 *
125 * This callback is mandatory.
126 *
127 * RETURNS:
128 *
129 * The driver should return 0 on success and a negative error code on
130 * failure.
131 */
94 int (*fb_probe)(struct drm_fb_helper *helper, 132 int (*fb_probe)(struct drm_fb_helper *helper,
95 struct drm_fb_helper_surface_size *sizes); 133 struct drm_fb_helper_surface_size *sizes);
134
135 /**
136 * @initial_config:
137 *
138 * Driver callback to setup an initial fbdev display configuration.
139 * Drivers can use this callback to tell the fbdev emulation what the
140 * preferred initial configuration is. This is useful to implement
141 * smooth booting where the fbdev (and subsequently all userspace) never
142 * changes the mode, but always inherits the existing configuration.
143 *
144 * This callback is optional.
145 *
146 * RETURNS:
147 *
148 * The driver should return true if a suitable initial configuration has
149 * been filled out and false when the fbdev helper should fall back to
150 * the default probing logic.
151 */
96 bool (*initial_config)(struct drm_fb_helper *fb_helper, 152 bool (*initial_config)(struct drm_fb_helper *fb_helper,
97 struct drm_fb_helper_crtc **crtcs, 153 struct drm_fb_helper_crtc **crtcs,
98 struct drm_display_mode **modes, 154 struct drm_display_mode **modes,
@@ -105,18 +161,22 @@ struct drm_fb_helper_connector {
105}; 161};
106 162
107/** 163/**
108 * struct drm_fb_helper - helper to emulate fbdev on top of kms 164 * struct drm_fb_helper - main structure to emulate fbdev on top of KMS
109 * @fb: Scanout framebuffer object 165 * @fb: Scanout framebuffer object
110 * @dev: DRM device 166 * @dev: DRM device
111 * @crtc_count: number of possible CRTCs 167 * @crtc_count: number of possible CRTCs
112 * @crtc_info: per-CRTC helper state (mode, x/y offset, etc) 168 * @crtc_info: per-CRTC helper state (mode, x/y offset, etc)
113 * @connector_count: number of connected connectors 169 * @connector_count: number of connected connectors
114 * @connector_info_alloc_count: size of connector_info 170 * @connector_info_alloc_count: size of connector_info
171 * @connector_info: array of per-connector information
115 * @funcs: driver callbacks for fb helper 172 * @funcs: driver callbacks for fb helper
116 * @fbdev: emulated fbdev device info struct 173 * @fbdev: emulated fbdev device info struct
117 * @pseudo_palette: fake palette of 16 colors 174 * @pseudo_palette: fake palette of 16 colors
118 * @kernel_fb_list: list_head in kernel_fb_helper_list 175 *
119 * @delayed_hotplug: was there a hotplug while kms master active? 176 * This is the main structure used by the fbdev helpers. Drivers supporting
177 * fbdev emulation should embedded this into their overall driver structure.
178 * Drivers must also fill out a struct &drm_fb_helper_funcs with a few
179 * operations.
120 */ 180 */
121struct drm_fb_helper { 181struct drm_fb_helper {
122 struct drm_framebuffer *fb; 182 struct drm_framebuffer *fb;
@@ -129,10 +189,21 @@ struct drm_fb_helper {
129 const struct drm_fb_helper_funcs *funcs; 189 const struct drm_fb_helper_funcs *funcs;
130 struct fb_info *fbdev; 190 struct fb_info *fbdev;
131 u32 pseudo_palette[17]; 191 u32 pseudo_palette[17];
192
193 /**
194 * @kernel_fb_list:
195 *
196 * Entry on the global kernel_fb_helper_list, used for kgdb entry/exit.
197 */
132 struct list_head kernel_fb_list; 198 struct list_head kernel_fb_list;
133 199
134 /* we got a hotplug but fbdev wasn't running the console 200 /**
135 delay until next set_par */ 201 * @delayed_hotplug:
202 *
203 * A hotplug was received while fbdev wasn't in control of the DRM
204 * device, i.e. another KMS master was active. The output configuration
205 * needs to be reprobe when fbdev is in control again.
206 */
136 bool delayed_hotplug; 207 bool delayed_hotplug;
137 208
138 /** 209 /**
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 15e7f007380f..0b3e11ab8757 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -35,76 +35,129 @@
35 */ 35 */
36 36
37/** 37/**
38 * This structure defines the drm_mm memory object, which will be used by the 38 * struct drm_gem_object - GEM buffer object
39 * DRM for its buffer objects. 39 *
40 * This structure defines the generic parts for GEM buffer objects, which are
41 * mostly around handling mmap and userspace handles.
42 *
43 * Buffer objects are often abbreviated to BO.
40 */ 44 */
41struct drm_gem_object { 45struct drm_gem_object {
42 /** Reference count of this object */ 46 /**
47 * @refcount:
48 *
49 * Reference count of this object
50 *
51 * Please use drm_gem_object_reference() to acquire and
52 * drm_gem_object_unreference() or drm_gem_object_unreference_unlocked()
53 * to release a reference to a GEM buffer object.
54 */
43 struct kref refcount; 55 struct kref refcount;
44 56
45 /** 57 /**
46 * handle_count - gem file_priv handle count of this object 58 * @handle_count:
59 *
60 * This is the GEM file_priv handle count of this object.
47 * 61 *
48 * Each handle also holds a reference. Note that when the handle_count 62 * Each handle also holds a reference. Note that when the handle_count
49 * drops to 0 any global names (e.g. the id in the flink namespace) will 63 * drops to 0 any global names (e.g. the id in the flink namespace) will
50 * be cleared. 64 * be cleared.
51 * 65 *
52 * Protected by dev->object_name_lock. 66 * Protected by dev->object_name_lock.
53 * */ 67 */
54 unsigned handle_count; 68 unsigned handle_count;
55 69
56 /** Related drm device */ 70 /**
71 * @dev: DRM dev this object belongs to.
72 */
57 struct drm_device *dev; 73 struct drm_device *dev;
58 74
59 /** File representing the shmem storage */ 75 /**
76 * @filp:
77 *
78 * SHMEM file node used as backing storage for swappable buffer objects.
79 * GEM also supports driver private objects with driver-specific backing
80 * storage (contiguous CMA memory, special reserved blocks). In this
81 * case @filp is NULL.
82 */
60 struct file *filp; 83 struct file *filp;
61 84
62 /* Mapping info for this object */ 85 /**
86 * @vma_node:
87 *
88 * Mapping info for this object to support mmap. Drivers are supposed to
89 * allocate the mmap offset using drm_gem_create_mmap_offset(). The
90 * offset itself can be retrieved using drm_vma_node_offset_addr().
91 *
92 * Memory mapping itself is handled by drm_gem_mmap(), which also checks
93 * that userspace is allowed to access the object.
94 */
63 struct drm_vma_offset_node vma_node; 95 struct drm_vma_offset_node vma_node;
64 96
65 /** 97 /**
98 * @size:
99 *
66 * Size of the object, in bytes. Immutable over the object's 100 * Size of the object, in bytes. Immutable over the object's
67 * lifetime. 101 * lifetime.
68 */ 102 */
69 size_t size; 103 size_t size;
70 104
71 /** 105 /**
106 * @name:
107 *
72 * Global name for this object, starts at 1. 0 means unnamed. 108 * Global name for this object, starts at 1. 0 means unnamed.
73 * Access is covered by the object_name_lock in the related drm_device 109 * Access is covered by dev->object_name_lock. This is used by the GEM_FLINK
110 * and GEM_OPEN ioctls.
74 */ 111 */
75 int name; 112 int name;
76 113
77 /** 114 /**
78 * Memory domains. These monitor which caches contain read/write data 115 * @read_domains:
116 *
117 * Read memory domains. These monitor which caches contain read/write data
79 * related to the object. When transitioning from one set of domains 118 * related to the object. When transitioning from one set of domains
80 * to another, the driver is called to ensure that caches are suitably 119 * to another, the driver is called to ensure that caches are suitably
81 * flushed and invalidated 120 * flushed and invalidated.
82 */ 121 */
83 uint32_t read_domains; 122 uint32_t read_domains;
123
124 /**
125 * @write_domain: Corresponding unique write memory domain.
126 */
84 uint32_t write_domain; 127 uint32_t write_domain;
85 128
86 /** 129 /**
130 * @pending_read_domains:
131 *
87 * While validating an exec operation, the 132 * While validating an exec operation, the
88 * new read/write domain values are computed here. 133 * new read/write domain values are computed here.
89 * They will be transferred to the above values 134 * They will be transferred to the above values
90 * at the point that any cache flushing occurs 135 * at the point that any cache flushing occurs
91 */ 136 */
92 uint32_t pending_read_domains; 137 uint32_t pending_read_domains;
138
139 /**
140 * @pending_write_domain: Write domain similar to @pending_read_domains.
141 */
93 uint32_t pending_write_domain; 142 uint32_t pending_write_domain;
94 143
95 /** 144 /**
96 * dma_buf - dma buf associated with this GEM object 145 * @dma_buf:
146 *
147 * dma-buf associated with this GEM object.
97 * 148 *
98 * Pointer to the dma-buf associated with this gem object (either 149 * Pointer to the dma-buf associated with this gem object (either
99 * through importing or exporting). We break the resulting reference 150 * through importing or exporting). We break the resulting reference
100 * loop when the last gem handle for this object is released. 151 * loop when the last gem handle for this object is released.
101 * 152 *
102 * Protected by obj->object_name_lock 153 * Protected by obj->object_name_lock.
103 */ 154 */
104 struct dma_buf *dma_buf; 155 struct dma_buf *dma_buf;
105 156
106 /** 157 /**
107 * import_attach - dma buf attachment backing this object 158 * @import_attach:
159 *
160 * dma-buf attachment backing this object.
108 * 161 *
109 * Any foreign dma_buf imported as a gem object has this set to the 162 * Any foreign dma_buf imported as a gem object has this set to the
110 * attachment point for the device. This is invariant over the lifetime 163 * attachment point for the device. This is invariant over the lifetime
@@ -133,12 +186,30 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
133 struct vm_area_struct *vma); 186 struct vm_area_struct *vma);
134int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); 187int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
135 188
189/**
190 * drm_gem_object_reference - acquire a GEM BO reference
191 * @obj: GEM buffer object
192 *
193 * This acquires additional reference to @obj. It is illegal to call this
194 * without already holding a reference. No locks required.
195 */
136static inline void 196static inline void
137drm_gem_object_reference(struct drm_gem_object *obj) 197drm_gem_object_reference(struct drm_gem_object *obj)
138{ 198{
139 kref_get(&obj->refcount); 199 kref_get(&obj->refcount);
140} 200}
141 201
202/**
203 * drm_gem_object_unreference - release a GEM BO reference
204 * @obj: GEM buffer object
205 *
206 * This releases a reference to @obj. Callers must hold the dev->struct_mutex
207 * lock when calling this function, even when the driver doesn't use
208 * dev->struct_mutex for anything.
209 *
210 * For drivers not encumbered with legacy locking use
211 * drm_gem_object_unreference_unlocked() instead.
212 */
142static inline void 213static inline void
143drm_gem_object_unreference(struct drm_gem_object *obj) 214drm_gem_object_unreference(struct drm_gem_object *obj)
144{ 215{
@@ -149,6 +220,13 @@ drm_gem_object_unreference(struct drm_gem_object *obj)
149 } 220 }
150} 221}
151 222
223/**
224 * drm_gem_object_unreference_unlocked - release a GEM BO reference
225 * @obj: GEM buffer object
226 *
227 * This releases a reference to @obj. Callers must not hold the
228 * dev->struct_mutex lock when calling this function.
229 */
152static inline void 230static inline void
153drm_gem_object_unreference_unlocked(struct drm_gem_object *obj) 231drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
154{ 232{
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 0de6290df4da..fc65118e5077 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -148,8 +148,7 @@ static inline u64 drm_mm_hole_node_start(struct drm_mm_node *hole_node)
148 148
149static inline u64 __drm_mm_hole_node_end(struct drm_mm_node *hole_node) 149static inline u64 __drm_mm_hole_node_end(struct drm_mm_node *hole_node)
150{ 150{
151 return list_entry(hole_node->node_list.next, 151 return list_next_entry(hole_node, node_list)->start;
152 struct drm_mm_node, node_list)->start;
153} 152}
154 153
155/** 154/**
@@ -180,6 +179,14 @@ static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node)
180 &(mm)->head_node.node_list, \ 179 &(mm)->head_node.node_list, \
181 node_list) 180 node_list)
182 181
182#define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \
183 for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
184 &entry->hole_stack != &(mm)->hole_stack ? \
185 hole_start = drm_mm_hole_node_start(entry), \
186 hole_end = drm_mm_hole_node_end(entry), \
187 1 : 0; \
188 entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack))
189
183/** 190/**
184 * drm_mm_for_each_hole - iterator to walk over all holes 191 * drm_mm_for_each_hole - iterator to walk over all holes
185 * @entry: drm_mm_node used internally to track progress 192 * @entry: drm_mm_node used internally to track progress
@@ -200,20 +207,7 @@ static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node)
200 * going backwards. 207 * going backwards.
201 */ 208 */
202#define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \ 209#define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \
203 for (entry = list_entry((mm)->hole_stack.next, struct drm_mm_node, hole_stack); \ 210 __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, 0)
204 &entry->hole_stack != &(mm)->hole_stack ? \
205 hole_start = drm_mm_hole_node_start(entry), \
206 hole_end = drm_mm_hole_node_end(entry), \
207 1 : 0; \
208 entry = list_entry(entry->hole_stack.next, struct drm_mm_node, hole_stack))
209
210#define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \
211 for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
212 &entry->hole_stack != &(mm)->hole_stack ? \
213 hole_start = drm_mm_hole_node_start(entry), \
214 hole_end = drm_mm_hole_node_end(entry), \
215 1 : 0; \
216 entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack))
217 211
218/* 212/*
219 * Basic range manager support (drm_mm.c) 213 * Basic range manager support (drm_mm.c)
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index 08a8cac9e555..625966a906f2 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -35,46 +35,91 @@
35 * structures). 35 * structures).
36 */ 36 */
37 37
38/**
39 * enum drm_mode_status - hardware support status of a mode
40 * @MODE_OK: Mode OK
41 * @MODE_HSYNC: hsync out of range
42 * @MODE_VSYNC: vsync out of range
43 * @MODE_H_ILLEGAL: mode has illegal horizontal timings
44 * @MODE_V_ILLEGAL: mode has illegal horizontal timings
45 * @MODE_BAD_WIDTH: requires an unsupported linepitch
46 * @MODE_NOMODE: no mode with a matching name
47 * @MODE_NO_INTERLACE: interlaced mode not supported
48 * @MODE_NO_DBLESCAN: doublescan mode not supported
49 * @MODE_NO_VSCAN: multiscan mode not supported
50 * @MODE_MEM: insufficient video memory
51 * @MODE_VIRTUAL_X: mode width too large for specified virtual size
52 * @MODE_VIRTUAL_Y: mode height too large for specified virtual size
53 * @MODE_MEM_VIRT: insufficient video memory given virtual size
54 * @MODE_NOCLOCK: no fixed clock available
55 * @MODE_CLOCK_HIGH: clock required is too high
56 * @MODE_CLOCK_LOW: clock required is too low
57 * @MODE_CLOCK_RANGE: clock/mode isn't in a ClockRange
58 * @MODE_BAD_HVALUE: horizontal timing was out of range
59 * @MODE_BAD_VVALUE: vertical timing was out of range
60 * @MODE_BAD_VSCAN: VScan value out of range
61 * @MODE_HSYNC_NARROW: horizontal sync too narrow
62 * @MODE_HSYNC_WIDE: horizontal sync too wide
63 * @MODE_HBLANK_NARROW: horizontal blanking too narrow
64 * @MODE_HBLANK_WIDE: horizontal blanking too wide
65 * @MODE_VSYNC_NARROW: vertical sync too narrow
66 * @MODE_VSYNC_WIDE: vertical sync too wide
67 * @MODE_VBLANK_NARROW: vertical blanking too narrow
68 * @MODE_VBLANK_WIDE: vertical blanking too wide
69 * @MODE_PANEL: exceeds panel dimensions
70 * @MODE_INTERLACE_WIDTH: width too large for interlaced mode
71 * @MODE_ONE_WIDTH: only one width is supported
72 * @MODE_ONE_HEIGHT: only one height is supported
73 * @MODE_ONE_SIZE: only one resolution is supported
74 * @MODE_NO_REDUCED: monitor doesn't accept reduced blanking
75 * @MODE_NO_STEREO: stereo modes not supported
76 * @MODE_STALE: mode has become stale
77 * @MODE_BAD: unspecified reason
78 * @MODE_ERROR: error condition
79 *
80 * This enum is used to filter out modes not supported by the driver/hardware
81 * combination.
82 */
38enum drm_mode_status { 83enum drm_mode_status {
39 MODE_OK = 0, /* Mode OK */ 84 MODE_OK = 0,
40 MODE_HSYNC, /* hsync out of range */ 85 MODE_HSYNC,
41 MODE_VSYNC, /* vsync out of range */ 86 MODE_VSYNC,
42 MODE_H_ILLEGAL, /* mode has illegal horizontal timings */ 87 MODE_H_ILLEGAL,
43 MODE_V_ILLEGAL, /* mode has illegal horizontal timings */ 88 MODE_V_ILLEGAL,
44 MODE_BAD_WIDTH, /* requires an unsupported linepitch */ 89 MODE_BAD_WIDTH,
45 MODE_NOMODE, /* no mode with a matching name */ 90 MODE_NOMODE,
46 MODE_NO_INTERLACE, /* interlaced mode not supported */ 91 MODE_NO_INTERLACE,
47 MODE_NO_DBLESCAN, /* doublescan mode not supported */ 92 MODE_NO_DBLESCAN,
48 MODE_NO_VSCAN, /* multiscan mode not supported */ 93 MODE_NO_VSCAN,
49 MODE_MEM, /* insufficient video memory */ 94 MODE_MEM,
50 MODE_VIRTUAL_X, /* mode width too large for specified virtual size */ 95 MODE_VIRTUAL_X,
51 MODE_VIRTUAL_Y, /* mode height too large for specified virtual size */ 96 MODE_VIRTUAL_Y,
52 MODE_MEM_VIRT, /* insufficient video memory given virtual size */ 97 MODE_MEM_VIRT,
53 MODE_NOCLOCK, /* no fixed clock available */ 98 MODE_NOCLOCK,
54 MODE_CLOCK_HIGH, /* clock required is too high */ 99 MODE_CLOCK_HIGH,
55 MODE_CLOCK_LOW, /* clock required is too low */ 100 MODE_CLOCK_LOW,
56 MODE_CLOCK_RANGE, /* clock/mode isn't in a ClockRange */ 101 MODE_CLOCK_RANGE,
57 MODE_BAD_HVALUE, /* horizontal timing was out of range */ 102 MODE_BAD_HVALUE,
58 MODE_BAD_VVALUE, /* vertical timing was out of range */ 103 MODE_BAD_VVALUE,
59 MODE_BAD_VSCAN, /* VScan value out of range */ 104 MODE_BAD_VSCAN,
60 MODE_HSYNC_NARROW, /* horizontal sync too narrow */ 105 MODE_HSYNC_NARROW,
61 MODE_HSYNC_WIDE, /* horizontal sync too wide */ 106 MODE_HSYNC_WIDE,
62 MODE_HBLANK_NARROW, /* horizontal blanking too narrow */ 107 MODE_HBLANK_NARROW,
63 MODE_HBLANK_WIDE, /* horizontal blanking too wide */ 108 MODE_HBLANK_WIDE,
64 MODE_VSYNC_NARROW, /* vertical sync too narrow */ 109 MODE_VSYNC_NARROW,
65 MODE_VSYNC_WIDE, /* vertical sync too wide */ 110 MODE_VSYNC_WIDE,
66 MODE_VBLANK_NARROW, /* vertical blanking too narrow */ 111 MODE_VBLANK_NARROW,
67 MODE_VBLANK_WIDE, /* vertical blanking too wide */ 112 MODE_VBLANK_WIDE,
68 MODE_PANEL, /* exceeds panel dimensions */ 113 MODE_PANEL,
69 MODE_INTERLACE_WIDTH, /* width too large for interlaced mode */ 114 MODE_INTERLACE_WIDTH,
70 MODE_ONE_WIDTH, /* only one width is supported */ 115 MODE_ONE_WIDTH,
71 MODE_ONE_HEIGHT, /* only one height is supported */ 116 MODE_ONE_HEIGHT,
72 MODE_ONE_SIZE, /* only one resolution is supported */ 117 MODE_ONE_SIZE,
73 MODE_NO_REDUCED, /* monitor doesn't accept reduced blanking */ 118 MODE_NO_REDUCED,
74 MODE_NO_STEREO, /* stereo modes not supported */ 119 MODE_NO_STEREO,
75 MODE_UNVERIFIED = -3, /* mode needs to reverified */ 120 MODE_STALE = -3,
76 MODE_BAD = -2, /* unspecified reason */ 121 MODE_BAD = -2,
77 MODE_ERROR = -1 /* error condition */ 122 MODE_ERROR = -1
78}; 123};
79 124
80#define DRM_MODE_TYPE_CLOCK_CRTC_C (DRM_MODE_TYPE_CLOCK_C | \ 125#define DRM_MODE_TYPE_CLOCK_CRTC_C (DRM_MODE_TYPE_CLOCK_C | \
@@ -96,17 +141,125 @@ enum drm_mode_status {
96 141
97#define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF 142#define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF
98 143
144/**
145 * struct drm_display_mode - DRM kernel-internal display mode structure
146 * @hdisplay: horizontal display size
147 * @hsync_start: horizontal sync start
148 * @hsync_end: horizontal sync end
149 * @htotal: horizontal total size
150 * @hskew: horizontal skew?!
151 * @vdisplay: vertical display size
152 * @vsync_start: vertical sync start
153 * @vsync_end: vertical sync end
154 * @vtotal: vertical total size
155 * @vscan: vertical scan?!
156 * @crtc_hdisplay: hardware mode horizontal display size
157 * @crtc_hblank_start: hardware mode horizontal blank start
158 * @crtc_hblank_end: hardware mode horizontal blank end
159 * @crtc_hsync_start: hardware mode horizontal sync start
160 * @crtc_hsync_end: hardware mode horizontal sync end
161 * @crtc_htotal: hardware mode horizontal total size
162 * @crtc_hskew: hardware mode horizontal skew?!
163 * @crtc_vdisplay: hardware mode vertical display size
164 * @crtc_vblank_start: hardware mode vertical blank start
165 * @crtc_vblank_end: hardware mode vertical blank end
166 * @crtc_vsync_start: hardware mode vertical sync start
167 * @crtc_vsync_end: hardware mode vertical sync end
168 * @crtc_vtotal: hardware mode vertical total size
169 *
170 * The horizontal and vertical timings are defined per the following diagram.
171 *
172 *
173 * Active Front Sync Back
174 * Region Porch Porch
175 * <-----------------------><----------------><-------------><-------------->
176 * //////////////////////|
177 * ////////////////////// |
178 * ////////////////////// |.................. ................
179 * _______________
180 * <----- [hv]display ----->
181 * <------------- [hv]sync_start ------------>
182 * <--------------------- [hv]sync_end --------------------->
183 * <-------------------------------- [hv]total ----------------------------->*
184 *
185 * This structure contains two copies of timings. First are the plain timings,
186 * which specify the logical mode, as it would be for a progressive 1:1 scanout
187 * at the refresh rate userspace can observe through vblank timestamps. Then
188 * there's the hardware timings, which are corrected for interlacing,
189 * double-clocking and similar things. They are provided as a convenience, and
190 * can be appropriately computed using drm_mode_set_crtcinfo().
191 */
99struct drm_display_mode { 192struct drm_display_mode {
100 /* Header */ 193 /**
194 * @head:
195 *
196 * struct list_head for mode lists.
197 */
101 struct list_head head; 198 struct list_head head;
199
200 /**
201 * @base:
202 *
203 * A display mode is a normal modeset object, possibly including public
204 * userspace id.
205 *
206 * FIXME:
207 *
208 * This can probably be removed since the entire concept of userspace
209 * managing modes explicitly has never landed in upstream kernel mode
210 * setting support.
211 */
102 struct drm_mode_object base; 212 struct drm_mode_object base;
103 213
214 /**
215 * @name:
216 *
217 * Human-readable name of the mode, filled out with drm_mode_set_name().
218 */
104 char name[DRM_DISPLAY_MODE_LEN]; 219 char name[DRM_DISPLAY_MODE_LEN];
105 220
221 /**
222 * @status:
223 *
224 * Status of the mode, used to filter out modes not supported by the
225 * hardware. See enum &drm_mode_status.
226 */
106 enum drm_mode_status status; 227 enum drm_mode_status status;
228
229 /**
230 * @type:
231 *
232 * A bitmask of flags, mostly about the source of a mode. Possible flags
233 * are:
234 *
235 * - DRM_MODE_TYPE_BUILTIN: Meant for hard-coded modes, effectively
236 * unused.
237 * - DRM_MODE_TYPE_PREFERRED: Preferred mode, usually the native
238 * resolution of an LCD panel. There should only be one preferred
239 * mode per connector at any given time.
240 * - DRM_MODE_TYPE_DRIVER: Mode created by the driver, which is all of
241 * them really. Drivers must set this bit for all modes they create
242 * and expose to userspace.
243 *
244 * Plus a big list of flags which shouldn't be used at all, but are
245 * still around since these flags are also used in the userspace ABI:
246 *
247 * - DRM_MODE_TYPE_DEFAULT: Again a leftover, use
248 * DRM_MODE_TYPE_PREFERRED instead.
249 * - DRM_MODE_TYPE_CLOCK_C and DRM_MODE_TYPE_CRTC_C: Define leftovers
250 * which are stuck around for hysterical raisins only. No one has an
251 * idea what they were meant for. Don't use.
252 * - DRM_MODE_TYPE_USERDEF: Mode defined by userspace, again a vestige
253 * from older kms designs where userspace had to first add a custom
254 * mode to the kernel's mode list before it could use it. Don't use.
255 */
107 unsigned int type; 256 unsigned int type;
108 257
109 /* Proposed mode values */ 258 /**
259 * @clock:
260 *
261 * Pixel clock in kHz.
262 */
110 int clock; /* in kHz */ 263 int clock; /* in kHz */
111 int hdisplay; 264 int hdisplay;
112 int hsync_start; 265 int hsync_start;
@@ -118,14 +271,74 @@ struct drm_display_mode {
118 int vsync_end; 271 int vsync_end;
119 int vtotal; 272 int vtotal;
120 int vscan; 273 int vscan;
274 /**
275 * @flags:
276 *
277 * Sync and timing flags:
278 *
279 * - DRM_MODE_FLAG_PHSYNC: horizontal sync is active high.
280 * - DRM_MODE_FLAG_NHSYNC: horizontal sync is active low.
281 * - DRM_MODE_FLAG_PVSYNC: vertical sync is active high.
282 * - DRM_MODE_FLAG_NVSYNC: vertical sync is active low.
283 * - DRM_MODE_FLAG_INTERLACE: mode is interlaced.
284 * - DRM_MODE_FLAG_DBLSCAN: mode uses doublescan.
285 * - DRM_MODE_FLAG_CSYNC: mode uses composite sync.
286 * - DRM_MODE_FLAG_PCSYNC: composite sync is active high.
287 * - DRM_MODE_FLAG_NCSYNC: composite sync is active low.
288 * - DRM_MODE_FLAG_HSKEW: hskew provided (not used?).
289 * - DRM_MODE_FLAG_BCAST: not used?
290 * - DRM_MODE_FLAG_PIXMUX: not used?
291 * - DRM_MODE_FLAG_DBLCLK: double-clocked mode.
292 * - DRM_MODE_FLAG_CLKDIV2: half-clocked mode.
293 *
294 * Additionally there's flags to specify how 3D modes are packed:
295 *
296 * - DRM_MODE_FLAG_3D_NONE: normal, non-3D mode.
297 * - DRM_MODE_FLAG_3D_FRAME_PACKING: 2 full frames for left and right.
298 * - DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE: interleaved like fields.
299 * - DRM_MODE_FLAG_3D_LINE_ALTERNATIVE: interleaved lines.
300 * - DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL: side-by-side full frames.
301 * - DRM_MODE_FLAG_3D_L_DEPTH: ?
302 * - DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH: ?
303 * - DRM_MODE_FLAG_3D_TOP_AND_BOTTOM: frame split into top and bottom
304 * parts.
305 * - DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF: frame split into left and
306 * right parts.
307 */
121 unsigned int flags; 308 unsigned int flags;
122 309
123 /* Addressable image size (may be 0 for projectors, etc.) */ 310 /**
311 * @width_mm:
312 *
313 * Addressable size of the output in mm, projectors should set this to
314 * 0.
315 */
124 int width_mm; 316 int width_mm;
317
318 /**
319 * @height_mm:
320 *
321 * Addressable size of the output in mm, projectors should set this to
322 * 0.
323 */
125 int height_mm; 324 int height_mm;
126 325
127 /* Actual mode we give to hw */ 326 /**
128 int crtc_clock; /* in KHz */ 327 * @crtc_clock:
328 *
329 * Actual pixel or dot clock in the hardware. This differs from the
330 * logical @clock when e.g. using interlacing, double-clocking, stereo
331 * modes or other fancy stuff that changes the timings and signals
332 * actually sent over the wire.
333 *
334 * This is again in kHz.
335 *
336 * Note that with digital outputs like HDMI or DP there's usually a
337 * massive confusion between the dot clock and the signal clock at the
338 * bit encoding level. Especially when a 8b/10b encoding is used and the
339 * difference is exactly a factor of 10.
340 */
341 int crtc_clock;
129 int crtc_hdisplay; 342 int crtc_hdisplay;
130 int crtc_hblank_start; 343 int crtc_hblank_start;
131 int crtc_hblank_end; 344 int crtc_hblank_end;
@@ -140,12 +353,48 @@ struct drm_display_mode {
140 int crtc_vsync_end; 353 int crtc_vsync_end;
141 int crtc_vtotal; 354 int crtc_vtotal;
142 355
143 /* Driver private mode info */ 356 /**
357 * @private:
358 *
359 * Pointer for driver private data. This can only be used for mode
360 * objects passed to drivers in modeset operations. It shouldn't be used
361 * by atomic drivers since they can store any additional data by
362 * subclassing state structures.
363 */
144 int *private; 364 int *private;
365
366 /**
367 * @private_flags:
368 *
369 * Similar to @private, but just an integer.
370 */
145 int private_flags; 371 int private_flags;
146 372
147 int vrefresh; /* in Hz */ 373 /**
148 int hsync; /* in kHz */ 374 * @vrefresh:
375 *
376 * Vertical refresh rate, for debug output in human readable form. Not
377 * used in a functional way.
378 *
379 * This value is in Hz.
380 */
381 int vrefresh;
382
383 /**
384 * @hsync:
385 *
386 * Horizontal refresh rate, for debug output in human readable form. Not
387 * used in a functional way.
388 *
389 * This value is in kHz.
390 */
391 int hsync;
392
393 /**
394 * @picture_aspect_ratio:
395 *
396 * Field for setting the HDMI picture aspect ratio of a mode.
397 */
149 enum hdmi_picture_aspect picture_aspect_ratio; 398 enum hdmi_picture_aspect picture_aspect_ratio;
150}; 399};
151 400
@@ -222,6 +471,8 @@ struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
222 const struct drm_display_mode *mode); 471 const struct drm_display_mode *mode);
223bool drm_mode_equal(const struct drm_display_mode *mode1, 472bool drm_mode_equal(const struct drm_display_mode *mode1,
224 const struct drm_display_mode *mode2); 473 const struct drm_display_mode *mode2);
474bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1,
475 const struct drm_display_mode *mode2);
225bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1, 476bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
226 const struct drm_display_mode *mode2); 477 const struct drm_display_mode *mode2);
227 478
@@ -232,7 +483,7 @@ enum drm_mode_status drm_mode_validate_size(const struct drm_display_mode *mode,
232void drm_mode_prune_invalid(struct drm_device *dev, 483void drm_mode_prune_invalid(struct drm_device *dev,
233 struct list_head *mode_list, bool verbose); 484 struct list_head *mode_list, bool verbose);
234void drm_mode_sort(struct list_head *mode_list); 485void drm_mode_sort(struct list_head *mode_list);
235void drm_mode_connector_list_update(struct drm_connector *connector, bool merge_type_bits); 486void drm_mode_connector_list_update(struct drm_connector *connector);
236 487
237/* parsing cmdline modes */ 488/* parsing cmdline modes */
238bool 489bool
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
new file mode 100644
index 000000000000..29e0dc50031d
--- /dev/null
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -0,0 +1,890 @@
1/*
2 * Copyright © 2006 Keith Packard
3 * Copyright © 2007-2008 Dave Airlie
4 * Copyright © 2007-2008 Intel Corporation
5 * Jesse Barnes <jesse.barnes@intel.com>
6 * Copyright © 2011-2013 Intel Corporation
7 * Copyright © 2015 Intel Corporation
8 * Daniel Vetter <daniel.vetter@ffwll.ch>
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a
11 * copy of this software and associated documentation files (the "Software"),
12 * to deal in the Software without restriction, including without limitation
13 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
14 * and/or sell copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following conditions:
16 *
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */
28
29#ifndef __DRM_MODESET_HELPER_VTABLES_H__
30#define __DRM_MODESET_HELPER_VTABLES_H__
31
32#include <drm/drm_crtc.h>
33
34/**
35 * DOC: overview
36 *
37 * The DRM mode setting helper functions are common code for drivers to use if
38 * they wish. Drivers are not forced to use this code in their
39 * implementations but it would be useful if the code they do use at least
40 * provides a consistent interface and operation to userspace. Therefore it is
41 * highly recommended to use the provided helpers as much as possible.
42 *
43 * Because there is only one pointer per modeset object to hold a vfunc table
44 * for helper libraries they are by necessity shared among the different
45 * helpers.
46 *
47 * To make this clear all the helper vtables are pulled together in this location here.
48 */
49
50enum mode_set_atomic;
51
52/**
53 * struct drm_crtc_helper_funcs - helper operations for CRTCs
54 *
55 * These hooks are used by the legacy CRTC helpers, the transitional plane
56 * helpers and the new atomic modesetting helpers.
57 */
58struct drm_crtc_helper_funcs {
59 /**
60 * @dpms:
61 *
62 * Callback to control power levels on the CRTC. If the mode passed in
63 * is unsupported, the provider must use the next lowest power level.
64 * This is used by the legacy CRTC helpers to implement DPMS
65 * functionality in drm_helper_connector_dpms().
66 *
67 * This callback is also used to disable a CRTC by calling it with
68 * DRM_MODE_DPMS_OFF if the @disable hook isn't used.
69 *
70 * This callback is used by the legacy CRTC helpers. Atomic helpers
71 * also support using this hook for enabling and disabling a CRTC to
72 * facilitate transitions to atomic, but it is deprecated. Instead
73 * @enable and @disable should be used.
74 */
75 void (*dpms)(struct drm_crtc *crtc, int mode);
76
77 /**
78 * @prepare:
79 *
80 * This callback should prepare the CRTC for a subsequent modeset, which
81 * in practice means the driver should disable the CRTC if it is
82 * running. Most drivers ended up implementing this by calling their
83 * @dpms hook with DRM_MODE_DPMS_OFF.
84 *
85 * This callback is used by the legacy CRTC helpers. Atomic helpers
86 * also support using this hook for disabling a CRTC to facilitate
87 * transitions to atomic, but it is deprecated. Instead @disable should
88 * be used.
89 */
90 void (*prepare)(struct drm_crtc *crtc);
91
92 /**
93 * @commit:
94 *
95 * This callback should commit the new mode on the CRTC after a modeset,
96 * which in practice means the driver should enable the CRTC. Most
97 * drivers ended up implementing this by calling their @dpms hook with
98 * DRM_MODE_DPMS_ON.
99 *
100 * This callback is used by the legacy CRTC helpers. Atomic helpers
101 * also support using this hook for enabling a CRTC to facilitate
102 * transitions to atomic, but it is deprecated. Instead @enable should
103 * be used.
104 */
105 void (*commit)(struct drm_crtc *crtc);
106
107 /**
108 * @mode_fixup:
109 *
110 * This callback is used to validate a mode. The parameter mode is the
111 * display mode that userspace requested, adjusted_mode is the mode the
112 * encoders need to be fed with. Note that this is the inverse semantics
113 * of the meaning for the &drm_encoder and &drm_bridge
114 * ->mode_fixup() functions. If the CRTC cannot support the requested
115 * conversion from mode to adjusted_mode it should reject the modeset.
116 *
117 * This function is used by both legacy CRTC helpers and atomic helpers.
118 * With atomic helpers it is optional.
119 *
120 * NOTE:
121 *
122 * This function is called in the check phase of atomic modesets, which
123 * can be aborted for any reason (including on userspace's request to
124 * just check whether a configuration would be possible). Atomic drivers
125 * MUST NOT touch any persistent state (hardware or software) or data
126 * structures except the passed in adjusted_mode parameter.
127 *
128 * This is in contrast to the legacy CRTC helpers where this was
129 * allowed.
130 *
131 * Atomic drivers which need to inspect and adjust more state should
132 * instead use the @atomic_check callback.
133 *
134 * RETURNS:
135 *
136 * True if an acceptable configuration is possible, false if the modeset
137 * operation should be rejected.
138 */
139 bool (*mode_fixup)(struct drm_crtc *crtc,
140 const struct drm_display_mode *mode,
141 struct drm_display_mode *adjusted_mode);
142
143 /**
144 * @mode_set:
145 *
146 * This callback is used by the legacy CRTC helpers to set a new mode,
147 * position and framebuffer. Since it ties the primary plane to every
148 * mode change it is incompatible with universal plane support. And
149 * since it can't update other planes it's incompatible with atomic
150 * modeset support.
151 *
152 * This callback is only used by CRTC helpers and deprecated.
153 *
154 * RETURNS:
155 *
156 * 0 on success or a negative error code on failure.
157 */
158 int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
159 struct drm_display_mode *adjusted_mode, int x, int y,
160 struct drm_framebuffer *old_fb);
161
162 /**
163 * @mode_set_nofb:
164 *
165 * This callback is used to update the display mode of a CRTC without
166 * changing anything of the primary plane configuration. This fits the
167 * requirement of atomic and hence is used by the atomic helpers. It is
168 * also used by the transitional plane helpers to implement a
169 * @mode_set hook in drm_helper_crtc_mode_set().
170 *
171 * Note that the display pipe is completely off when this function is
172 * called. Atomic drivers which need hardware to be running before they
173 * program the new display mode (e.g. because they implement runtime PM)
174 * should not use this hook. This is because the helper library calls
175 * this hook only once per mode change and not every time the display
176 * pipeline is suspended using either DPMS or the new "ACTIVE" property.
177 * Which means register values set in this callback might get reset when
178 * the CRTC is suspended, but not restored. Such drivers should instead
179 * move all their CRTC setup into the @enable callback.
180 *
181 * This callback is optional.
182 */
183 void (*mode_set_nofb)(struct drm_crtc *crtc);
184
185 /**
186 * @mode_set_base:
187 *
188 * This callback is used by the legacy CRTC helpers to set a new
189 * framebuffer and scanout position. It is optional and used as an
190 * optimized fast-path instead of a full mode set operation with all the
191 * resulting flickering. Since it can't update other planes it's
192 * incompatible with atomic modeset support.
193 *
194 * This callback is only used by the CRTC helpers and deprecated.
195 *
196 * RETURNS:
197 *
198 * 0 on success or a negative error code on failure.
199 */
200 int (*mode_set_base)(struct drm_crtc *crtc, int x, int y,
201 struct drm_framebuffer *old_fb);
202
203 /**
204 * @mode_set_base_atomic:
205 *
206 * This callback is used by the fbdev helpers to set a new framebuffer
207 * and scanout without sleeping, i.e. from an atomic calling context. It
208 * is only used to implement kgdb support.
209 *
210 * This callback is optional and only needed for kgdb support in the fbdev
211 * helpers.
212 *
213 * RETURNS:
214 *
215 * 0 on success or a negative error code on failure.
216 */
217 int (*mode_set_base_atomic)(struct drm_crtc *crtc,
218 struct drm_framebuffer *fb, int x, int y,
219 enum mode_set_atomic);
220
221 /**
222 * @load_lut:
223 *
224 * Load a LUT prepared with the @gamma_set functions from
225 * &drm_fb_helper_funcs.
226 *
227 * This callback is optional and is only used by the fbdev emulation
228 * helpers.
229 *
230 * FIXME:
231 *
232 * This callback is functionally redundant with the core gamma table
233 * support and simply exists because the fbdev hasn't yet been
234 * refactored to use the core gamma table interfaces.
235 */
236 void (*load_lut)(struct drm_crtc *crtc);
237
238 /**
239 * @disable:
240 *
241 * This callback should be used to disable the CRTC. With the atomic
242 * drivers it is called after all encoders connected to this CRTC have
243 * been shut off already using their own ->disable hook. If that
244 * sequence is too simple drivers can just add their own hooks and call
245 * it from this CRTC callback here by looping over all encoders
246 * connected to it using for_each_encoder_on_crtc().
247 *
248 * This hook is used both by legacy CRTC helpers and atomic helpers.
249 * Atomic drivers don't need to implement it if there's no need to
250 * disable anything at the CRTC level. To ensure that runtime PM
251 * handling (using either DPMS or the new "ACTIVE" property) works
252 * @disable must be the inverse of @enable for atomic drivers.
253 *
254 * NOTE:
255 *
256 * With legacy CRTC helpers there's a big semantic difference between
257 * @disable and other hooks (like @prepare or @dpms) used to shut down a
258 * CRTC: @disable is only called when also logically disabling the
259 * display pipeline and needs to release any resources acquired in
260 * @mode_set (like shared PLLs, or again release pinned framebuffers).
261 *
262 * Therefore @disable must be the inverse of @mode_set plus @commit for
263 * drivers still using legacy CRTC helpers, which is different from the
264 * rules under atomic.
265 */
266 void (*disable)(struct drm_crtc *crtc);
267
268 /**
269 * @enable:
270 *
271 * This callback should be used to enable the CRTC. With the atomic
272 * drivers it is called before all encoders connected to this CRTC are
273 * enabled through the encoder's own ->enable hook. If that sequence is
274 * too simple drivers can just add their own hooks and call it from this
275 * CRTC callback here by looping over all encoders connected to it using
276 * for_each_encoder_on_crtc().
277 *
278 * This hook is used only by atomic helpers, for symmetry with @disable.
279 * Atomic drivers don't need to implement it if there's no need to
280 * enable anything at the CRTC level. To ensure that runtime PM handling
281 * (using either DPMS or the new "ACTIVE" property) works
282 * @enable must be the inverse of @disable for atomic drivers.
283 */
284 void (*enable)(struct drm_crtc *crtc);
285
286 /**
287 * @atomic_check:
288 *
289 * Drivers should check plane-update related CRTC constraints in this
290 * hook. They can also check mode related limitations but need to be
291 * aware of the calling order, since this hook is used by
292 * drm_atomic_helper_check_planes() whereas the preparations needed to
293 * check output routing and the display mode is done in
294 * drm_atomic_helper_check_modeset(). Therefore drivers that want to
295 * check output routing and display mode constraints in this callback
296 * must ensure that drm_atomic_helper_check_modeset() has been called
297 * beforehand. This is calling order used by the default helper
298 * implementation in drm_atomic_helper_check().
299 *
300 * When using drm_atomic_helper_check_planes() CRTCs' ->atomic_check()
301 * hooks are called after the ones for planes, which allows drivers to
302 * assign shared resources requested by planes in the CRTC callback
303 * here. For more complicated dependencies the driver can call the provided
304 * check helpers multiple times until the computed state has a final
305 * configuration and everything has been checked.
306 *
307 * This function is also allowed to inspect any other object's state and
308 * can add more state objects to the atomic commit if needed. Care must
309 * be taken though to ensure that state check&compute functions for
310 * these added states are all called, and derived state in other objects
311 * all updated. Again the recommendation is to just call check helpers
312 * until a maximal configuration is reached.
313 *
314 * This callback is used by the atomic modeset helpers and by the
315 * transitional plane helpers, but it is optional.
316 *
317 * NOTE:
318 *
319 * This function is called in the check phase of an atomic update. The
320 * driver is not allowed to change anything outside of the free-standing
321 * state objects passed-in or assembled in the overall &drm_atomic_state
322 * update tracking structure.
323 *
324 * RETURNS:
325 *
326 * 0 on success, -EINVAL if the state or the transition can't be
327 * supported, -ENOMEM on memory allocation failure and -EDEADLK if an
328 * attempt to obtain another state object ran into a &drm_modeset_lock
329 * deadlock.
330 */
331 int (*atomic_check)(struct drm_crtc *crtc,
332 struct drm_crtc_state *state);
333
334 /**
335 * @atomic_begin:
336 *
337 * Drivers should prepare for an atomic update of multiple planes on
338 * a CRTC in this hook. Depending upon hardware this might be vblank
339 * evasion, blocking updates by setting bits or doing preparatory work
340 * for e.g. manual update display.
341 *
342 * This hook is called before any plane commit functions are called.
343 *
344 * Note that the power state of the display pipe when this function is
345 * called depends upon the exact helpers and calling sequence the driver
346 * has picked. See drm_atomic_commit_planes() for a discussion of the
347 * tradeoffs and variants of plane commit helpers.
348 *
349 * This callback is used by the atomic modeset helpers and by the
350 * transitional plane helpers, but it is optional.
351 */
352 void (*atomic_begin)(struct drm_crtc *crtc,
353 struct drm_crtc_state *old_crtc_state);
354 /**
355 * @atomic_flush:
356 *
357 * Drivers should finalize an atomic update of multiple planes on
358 * a CRTC in this hook. Depending upon hardware this might include
359 * checking that vblank evasion was successful, unblocking updates by
360 * setting bits or setting the GO bit to flush out all updates.
361 *
362 * Simple hardware or hardware with special requirements can commit and
363 * flush out all updates for all planes from this hook and forgo all the
364 * other commit hooks for plane updates.
365 *
366 * This hook is called after any plane commit functions are called.
367 *
368 * Note that the power state of the display pipe when this function is
369 * called depends upon the exact helpers and calling sequence the driver
370 * has picked. See drm_atomic_commit_planes() for a discussion of the
371 * tradeoffs and variants of plane commit helpers.
372 *
373 * This callback is used by the atomic modeset helpers and by the
374 * transitional plane helpers, but it is optional.
375 */
376 void (*atomic_flush)(struct drm_crtc *crtc,
377 struct drm_crtc_state *old_crtc_state);
378};
379
380/**
381 * drm_crtc_helper_add - sets the helper vtable for a crtc
382 * @crtc: DRM CRTC
383 * @funcs: helper vtable to set for @crtc
384 */
385static inline void drm_crtc_helper_add(struct drm_crtc *crtc,
386 const struct drm_crtc_helper_funcs *funcs)
387{
388 crtc->helper_private = funcs;
389}
390
391/**
392 * struct drm_encoder_helper_funcs - helper operations for encoders
393 *
394 * These hooks are used by the legacy CRTC helpers, the transitional plane
395 * helpers and the new atomic modesetting helpers.
396 */
397struct drm_encoder_helper_funcs {
398 /**
399 * @dpms:
400 *
401 * Callback to control power levels on the encoder. If the mode passed in
402 * is unsupported, the provider must use the next lowest power level.
403 * This is used by the legacy encoder helpers to implement DPMS
404 * functionality in drm_helper_connector_dpms().
405 *
406 * This callback is also used to disable an encoder by calling it with
407 * DRM_MODE_DPMS_OFF if the @disable hook isn't used.
408 *
409 * This callback is used by the legacy CRTC helpers. Atomic helpers
410 * also support using this hook for enabling and disabling an encoder to
411 * facilitate transitions to atomic, but it is deprecated. Instead
412 * @enable and @disable should be used.
413 */
414 void (*dpms)(struct drm_encoder *encoder, int mode);
415
416 /**
417 * @mode_fixup:
418 *
419 * This callback is used to validate and adjust a mode. The parameter
420 * mode is the display mode that should be fed to the next element in
421 * the display chain, either the final &drm_connector or a &drm_bridge.
422 * The parameter adjusted_mode is the input mode the encoder requires. It
423 * can be modified by this callback and does not need to match mode.
424 *
425 * This function is used by both legacy CRTC helpers and atomic helpers.
426 * With atomic helpers it is optional.
427 *
428 * NOTE:
429 *
430 * This function is called in the check phase of atomic modesets, which
431 * can be aborted for any reason (including on userspace's request to
432 * just check whether a configuration would be possible). Atomic drivers
433 * MUST NOT touch any persistent state (hardware or software) or data
434 * structures except the passed in adjusted_mode parameter.
435 *
436 * This is in contrast to the legacy CRTC helpers where this was
437 * allowed.
438 *
439 * Atomic drivers which need to inspect and adjust more state should
440 * instead use the @atomic_check callback.
441 *
442 * RETURNS:
443 *
444 * True if an acceptable configuration is possible, false if the modeset
445 * operation should be rejected.
446 */
447 bool (*mode_fixup)(struct drm_encoder *encoder,
448 const struct drm_display_mode *mode,
449 struct drm_display_mode *adjusted_mode);
450
451 /**
452 * @prepare:
453 *
454 * This callback should prepare the encoder for a subsequent modeset,
455 * which in practice means the driver should disable the encoder if it
456 * is running. Most drivers ended up implementing this by calling their
457 * @dpms hook with DRM_MODE_DPMS_OFF.
458 *
459 * This callback is used by the legacy CRTC helpers. Atomic helpers
460 * also support using this hook for disabling an encoder to facilitate
461 * transitions to atomic, but it is deprecated. Instead @disable should
462 * be used.
463 */
464 void (*prepare)(struct drm_encoder *encoder);
465
466 /**
467 * @commit:
468 *
469 * This callback should commit the new mode on the encoder after a modeset,
470 * which in practice means the driver should enable the encoder. Most
471 * drivers ended up implementing this by calling their @dpms hook with
472 * DRM_MODE_DPMS_ON.
473 *
474 * This callback is used by the legacy CRTC helpers. Atomic helpers
475 * also support using this hook for enabling an encoder to facilitate
476 * transitions to atomic, but it is deprecated. Instead @enable should
477 * be used.
478 */
479 void (*commit)(struct drm_encoder *encoder);
480
481 /**
482 * @mode_set:
483 *
484 * This callback is used to update the display mode of an encoder.
485 *
486 * Note that the display pipe is completely off when this function is
487 * called. Drivers which need hardware to be running before they program
488 * the new display mode (because they implement runtime PM) should not
489 * use this hook, because the helper library calls it only once and not
490 * every time the display pipeline is suspend using either DPMS or the
491 * new "ACTIVE" property. Such drivers should instead move all their
492 * encoder setup into the ->enable() callback.
493 *
494 * This callback is used both by the legacy CRTC helpers and the atomic
495 * modeset helpers. It is optional in the atomic helpers.
496 */
497 void (*mode_set)(struct drm_encoder *encoder,
498 struct drm_display_mode *mode,
499 struct drm_display_mode *adjusted_mode);
500
501 /**
502 * @get_crtc:
503 *
504 * This callback is used by the legacy CRTC helpers to work around
505 * deficiencies in its own book-keeping.
506 *
507 * Do not use, use atomic helpers instead, which get the book keeping
508 * right.
509 *
510 * FIXME:
511 *
512 * Currently only nouveau is using this, and as soon as nouveau is
513 * atomic we can ditch this hook.
514 */
515 struct drm_crtc *(*get_crtc)(struct drm_encoder *encoder);
516
517 /**
518 * @detect:
519 *
520 * This callback can be used by drivers who want to do detection on the
521 * encoder object instead of in connector functions.
522 *
523 * It is not used by any helper and therefore has purely driver-specific
524 * semantics. New drivers shouldn't use this and instead just implement
525 * their own private callbacks.
526 *
527 * FIXME:
528 *
529 * This should just be converted into a pile of driver vfuncs.
530 * Currently radeon, amdgpu and nouveau are using it.
531 */
532 enum drm_connector_status (*detect)(struct drm_encoder *encoder,
533 struct drm_connector *connector);
534
535 /**
536 * @disable:
537 *
538 * This callback should be used to disable the encoder. With the atomic
539 * drivers it is called before this encoder's CRTC has been shut off
540 * using the CRTC's own ->disable hook. If that sequence is too simple
541 * drivers can just add their own driver private encoder hooks and call
542 * them from CRTC's callback by looping over all encoders connected to
543 * it using for_each_encoder_on_crtc().
544 *
545 * This hook is used both by legacy CRTC helpers and atomic helpers.
546 * Atomic drivers don't need to implement it if there's no need to
547 * disable anything at the encoder level. To ensure that runtime PM
548 * handling (using either DPMS or the new "ACTIVE" property) works
549 * @disable must be the inverse of @enable for atomic drivers.
550 *
551 * NOTE:
552 *
553 * With legacy CRTC helpers there's a big semantic difference between
554 * @disable and other hooks (like @prepare or @dpms) used to shut down a
555 * encoder: @disable is only called when also logically disabling the
556 * display pipeline and needs to release any resources acquired in
557 * @mode_set (like shared PLLs, or again release pinned framebuffers).
558 *
559 * Therefore @disable must be the inverse of @mode_set plus @commit for
560 * drivers still using legacy CRTC helpers, which is different from the
561 * rules under atomic.
562 */
563 void (*disable)(struct drm_encoder *encoder);
564
565 /**
566 * @enable:
567 *
568 * This callback should be used to enable the encoder. With the atomic
569 * drivers it is called after this encoder's CRTC has been enabled using
570 * the CRTC's own ->enable hook. If that sequence is too simple drivers
571 * can just add their own driver private encoder hooks and call them
572 * from CRTC's callback by looping over all encoders connected to it
573 * using for_each_encoder_on_crtc().
574 *
575 * This hook is used only by atomic helpers, for symmetry with @disable.
576 * Atomic drivers don't need to implement it if there's no need to
577 * enable anything at the encoder level. To ensure that runtime PM handling
578 * (using either DPMS or the new "ACTIVE" property) works
579 * @enable must be the inverse of @disable for atomic drivers.
580 */
581 void (*enable)(struct drm_encoder *encoder);
582
583 /**
584 * @atomic_check:
585 *
586 * This callback is used to validate encoder state for atomic drivers.
587 * Since the encoder is the object connecting the CRTC and connector it
588 * gets passed both states, to be able to validate interactions and
589 * update the CRTC to match what the encoder needs for the requested
590 * connector.
591 *
592 * This function is used by the atomic helpers, but it is optional.
593 *
594 * NOTE:
595 *
596 * This function is called in the check phase of an atomic update. The
597 * driver is not allowed to change anything outside of the free-standing
598 * state objects passed-in or assembled in the overall &drm_atomic_state
599 * update tracking structure.
600 *
601 * RETURNS:
602 *
603 * 0 on success, -EINVAL if the state or the transition can't be
604 * supported, -ENOMEM on memory allocation failure and -EDEADLK if an
605 * attempt to obtain another state object ran into a &drm_modeset_lock
606 * deadlock.
607 */
608 int (*atomic_check)(struct drm_encoder *encoder,
609 struct drm_crtc_state *crtc_state,
610 struct drm_connector_state *conn_state);
611};
612
613/**
614 * drm_encoder_helper_add - sets the helper vtable for an encoder
615 * @encoder: DRM encoder
616 * @funcs: helper vtable to set for @encoder
617 */
618static inline void drm_encoder_helper_add(struct drm_encoder *encoder,
619 const struct drm_encoder_helper_funcs *funcs)
620{
621 encoder->helper_private = funcs;
622}
623
624/**
625 * struct drm_connector_helper_funcs - helper operations for connectors
626 *
627 * These functions are used by the atomic and legacy modeset helpers and by the
628 * probe helpers.
629 */
630struct drm_connector_helper_funcs {
631 /**
632 * @get_modes:
633 *
634 * This function should fill in all modes currently valid for the sink
635 * into the connector->probed_modes list. It should also update the
636 * EDID property by calling drm_mode_connector_update_edid_property().
637 *
638 * The usual way to implement this is to cache the EDID retrieved in the
639 * probe callback somewhere in the driver-private connector structure.
640 * In this function drivers then parse the modes in the EDID and add
641 * them by calling drm_add_edid_modes(). But connectors that driver a
642 * fixed panel can also manually add specific modes using
643 * drm_mode_probed_add(). Finally drivers that support audio probably
644 * want to update the ELD data, too, using drm_edid_to_eld().
645 *
646 * This function is only called after the ->detect() hook has indicated
647 * that a sink is connected and when the EDID isn't overridden through
648 * sysfs or the kernel commandline.
649 *
650 * This callback is used by the probe helpers in e.g.
651 * drm_helper_probe_single_connector_modes().
652 *
653 * RETURNS:
654 *
655 * The number of modes added by calling drm_mode_probed_add().
656 */
657 int (*get_modes)(struct drm_connector *connector);
658
659 /**
660 * @mode_valid:
661 *
662 * Callback to validate a mode for a connector, irrespective of the
663 * specific display configuration.
664 *
665 * This callback is used by the probe helpers to filter the mode list
666 * (which is usually derived from the EDID data block from the sink).
667 * See e.g. drm_helper_probe_single_connector_modes().
668 *
669 * NOTE:
670 *
671 * This only filters the mode list supplied to userspace in the
672 * GETCONNECOTR IOCTL. Userspace is free to create modes of its own and
673 * ask the kernel to use them. It this case the atomic helpers or legacy
674 * CRTC helpers will not call this function. Drivers therefore must
675 * still fully validate any mode passed in in a modeset request.
676 *
677 * RETURNS:
678 *
679 * Either MODE_OK or one of the failure reasons in enum
680 * &drm_mode_status.
681 */
682 enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
683 struct drm_display_mode *mode);
684 /**
685 * @best_encoder:
686 *
687 * This function should select the best encoder for the given connector.
688 *
689 * This function is used by both the atomic helpers (in the
690 * drm_atomic_helper_check_modeset() function) and in the legacy CRTC
691 * helpers.
692 *
693 * NOTE:
694 *
695 * In atomic drivers this function is called in the check phase of an
696 * atomic update. The driver is not allowed to change or inspect
697 * anything outside of arguments passed-in. Atomic drivers which need to
698 * inspect dynamic configuration state should instead use
699 * @atomic_best_encoder.
700 *
701 * RETURNS:
702 *
703 * Encoder that should be used for the given connector and connector
704 * state, or NULL if no suitable encoder exists. Note that the helpers
705 * will ensure that encoders aren't used twice, drivers should not check
706 * for this.
707 */
708 struct drm_encoder *(*best_encoder)(struct drm_connector *connector);
709
710 /**
711 * @atomic_best_encoder:
712 *
713 * This is the atomic version of @best_encoder for atomic drivers which
714 * need to select the best encoder depending upon the desired
715 * configuration and can't select it statically.
716 *
717 * This function is used by drm_atomic_helper_check_modeset() and either
718 * this or @best_encoder is required.
719 *
720 * NOTE:
721 *
722 * This function is called in the check phase of an atomic update. The
723 * driver is not allowed to change anything outside of the free-standing
724 * state objects passed-in or assembled in the overall &drm_atomic_state
725 * update tracking structure.
726 *
727 * RETURNS:
728 *
729 * Encoder that should be used for the given connector and connector
730 * state, or NULL if no suitable encoder exists. Note that the helpers
731 * will ensure that encoders aren't used twice, drivers should not check
732 * for this.
733 */
734 struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector,
735 struct drm_connector_state *connector_state);
736};
737
738/**
739 * drm_connector_helper_add - sets the helper vtable for a connector
740 * @connector: DRM connector
741 * @funcs: helper vtable to set for @connector
742 */
743static inline void drm_connector_helper_add(struct drm_connector *connector,
744 const struct drm_connector_helper_funcs *funcs)
745{
746 connector->helper_private = funcs;
747}
748
749/**
750 * struct drm_plane_helper_funcs - helper operations for planes
751 *
752 * These functions are used by the atomic helpers and by the transitional plane
753 * helpers.
754 */
755struct drm_plane_helper_funcs {
756 /**
757 * @prepare_fb:
758 *
759 * This hook is to prepare a framebuffer for scanout by e.g. pinning
760 * it's backing storage or relocating it into a contiguous block of
761 * VRAM. Other possible preparatory work includes flushing caches.
762 *
763 * This function must not block for outstanding rendering, since it is
764 * called in the context of the atomic IOCTL even for async commits to
765 * be able to return any errors to userspace. Instead the recommended
766 * way is to fill out the fence member of the passed-in
767 * &drm_plane_state. If the driver doesn't support native fences then
768 * equivalent functionality should be implemented through private
769 * members in the plane structure.
770 *
771 * The helpers will call @cleanup_fb with matching arguments for every
772 * successful call to this hook.
773 *
774 * This callback is used by the atomic modeset helpers and by the
775 * transitional plane helpers, but it is optional.
776 *
777 * RETURNS:
778 *
779 * 0 on success or one of the following negative error codes allowed by
780 * the atomic_commit hook in &drm_mode_config_funcs. When using helpers
781 * this callback is the only one which can fail an atomic commit,
782 * everything else must complete successfully.
783 */
784 int (*prepare_fb)(struct drm_plane *plane,
785 const struct drm_plane_state *new_state);
786 /**
787 * @cleanup_fb:
788 *
789 * This hook is called to clean up any resources allocated for the given
790 * framebuffer and plane configuration in @prepare_fb.
791 *
792 * This callback is used by the atomic modeset helpers and by the
793 * transitional plane helpers, but it is optional.
794 */
795 void (*cleanup_fb)(struct drm_plane *plane,
796 const struct drm_plane_state *old_state);
797
798 /**
799 * @atomic_check:
800 *
801 * Drivers should check plane specific constraints in this hook.
802 *
803 * When using drm_atomic_helper_check_planes() plane's ->atomic_check()
804 * hooks are called before the ones for CRTCs, which allows drivers to
805 * request shared resources that the CRTC controls here. For more
806 * complicated dependencies the driver can call the provided check helpers
807 * multiple times until the computed state has a final configuration and
808 * everything has been checked.
809 *
810 * This function is also allowed to inspect any other object's state and
811 * can add more state objects to the atomic commit if needed. Care must
812 * be taken though to ensure that state check&compute functions for
813 * these added states are all called, and derived state in other objects
814 * all updated. Again the recommendation is to just call check helpers
815 * until a maximal configuration is reached.
816 *
817 * This callback is used by the atomic modeset helpers and by the
818 * transitional plane helpers, but it is optional.
819 *
820 * NOTE:
821 *
822 * This function is called in the check phase of an atomic update. The
823 * driver is not allowed to change anything outside of the free-standing
824 * state objects passed-in or assembled in the overall &drm_atomic_state
825 * update tracking structure.
826 *
827 * RETURNS:
828 *
829 * 0 on success, -EINVAL if the state or the transition can't be
830 * supported, -ENOMEM on memory allocation failure and -EDEADLK if an
831 * attempt to obtain another state object ran into a &drm_modeset_lock
832 * deadlock.
833 */
834 int (*atomic_check)(struct drm_plane *plane,
835 struct drm_plane_state *state);
836
837 /**
838 * @atomic_update:
839 *
840 * Drivers should use this function to update the plane state. This
841 * hook is called in-between the ->atomic_begin() and
842 * ->atomic_flush() of &drm_crtc_helper_funcs.
843 *
844 * Note that the power state of the display pipe when this function is
845 * called depends upon the exact helpers and calling sequence the driver
846 * has picked. See drm_atomic_commit_planes() for a discussion of the
847 * tradeoffs and variants of plane commit helpers.
848 *
849 * This callback is used by the atomic modeset helpers and by the
850 * transitional plane helpers, but it is optional.
851 */
852 void (*atomic_update)(struct drm_plane *plane,
853 struct drm_plane_state *old_state);
854 /**
855 * @atomic_disable:
856 *
857 * Drivers should use this function to unconditionally disable a plane.
858 * This hook is called in-between the ->atomic_begin() and
859 * ->atomic_flush() of &drm_crtc_helper_funcs. It is an alternative to
860 * @atomic_update, which will be called for disabling planes, too, if
861 * the @atomic_disable hook isn't implemented.
862 *
863 * This hook is also useful to disable planes in preparation of a modeset,
864 * by calling drm_atomic_helper_disable_planes_on_crtc() from the
865 * ->disable() hook in &drm_crtc_helper_funcs.
866 *
867 * Note that the power state of the display pipe when this function is
868 * called depends upon the exact helpers and calling sequence the driver
869 * has picked. See drm_atomic_commit_planes() for a discussion of the
870 * tradeoffs and variants of plane commit helpers.
871 *
872 * This callback is used by the atomic modeset helpers and by the
873 * transitional plane helpers, but it is optional.
874 */
875 void (*atomic_disable)(struct drm_plane *plane,
876 struct drm_plane_state *old_state);
877};
878
879/**
880 * drm_plane_helper_add - sets the helper vtable for a plane
881 * @plane: DRM plane
882 * @funcs: helper vtable to set for @plane
883 */
884static inline void drm_plane_helper_add(struct drm_plane *plane,
885 const struct drm_plane_helper_funcs *funcs)
886{
887 plane->helper_private = funcs;
888}
889
890#endif
diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h
index 94938d89347c..c5576fbcb909 100644
--- a/include/drm/drm_modeset_lock.h
+++ b/include/drm/drm_modeset_lock.h
@@ -138,7 +138,7 @@ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev);
138struct drm_modeset_acquire_ctx * 138struct drm_modeset_acquire_ctx *
139drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc); 139drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc);
140 140
141int drm_modeset_lock_all_crtcs(struct drm_device *dev, 141int drm_modeset_lock_all_ctx(struct drm_device *dev,
142 struct drm_modeset_acquire_ctx *ctx); 142 struct drm_modeset_acquire_ctx *ctx);
143 143
144#endif /* DRM_MODESET_LOCK_H_ */ 144#endif /* DRM_MODESET_LOCK_H_ */
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 5a7f9d4efb1d..4421f3f4ca8d 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -26,6 +26,7 @@
26 26
27#include <drm/drm_rect.h> 27#include <drm/drm_rect.h>
28#include <drm/drm_crtc.h> 28#include <drm/drm_crtc.h>
29#include <drm/drm_modeset_helper_vtables.h>
29 30
30/* 31/*
31 * Drivers that don't allow primary plane scaling may pass this macro in place 32 * Drivers that don't allow primary plane scaling may pass this macro in place
@@ -36,46 +37,9 @@
36 */ 37 */
37#define DRM_PLANE_HELPER_NO_SCALING (1<<16) 38#define DRM_PLANE_HELPER_NO_SCALING (1<<16)
38 39
39/**
40 * DOC: plane helpers
41 *
42 * Helper functions to assist with creation and handling of CRTC primary
43 * planes.
44 */
45
46int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, 40int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
47 const struct drm_crtc_funcs *funcs); 41 const struct drm_crtc_funcs *funcs);
48 42
49/**
50 * drm_plane_helper_funcs - helper operations for CRTCs
51 * @prepare_fb: prepare a framebuffer for use by the plane
52 * @cleanup_fb: cleanup a framebuffer when it's no longer used by the plane
53 * @atomic_check: check that a given atomic state is valid and can be applied
54 * @atomic_update: apply an atomic state to the plane (mandatory)
55 * @atomic_disable: disable the plane
56 *
57 * The helper operations are called by the mid-layer CRTC helper.
58 */
59struct drm_plane_helper_funcs {
60 int (*prepare_fb)(struct drm_plane *plane,
61 const struct drm_plane_state *new_state);
62 void (*cleanup_fb)(struct drm_plane *plane,
63 const struct drm_plane_state *old_state);
64
65 int (*atomic_check)(struct drm_plane *plane,
66 struct drm_plane_state *state);
67 void (*atomic_update)(struct drm_plane *plane,
68 struct drm_plane_state *old_state);
69 void (*atomic_disable)(struct drm_plane *plane,
70 struct drm_plane_state *old_state);
71};
72
73static inline void drm_plane_helper_add(struct drm_plane *plane,
74 const struct drm_plane_helper_funcs *funcs)
75{
76 plane->helper_private = funcs;
77}
78
79int drm_plane_helper_check_update(struct drm_plane *plane, 43int drm_plane_helper_check_update(struct drm_plane *plane,
80 struct drm_crtc *crtc, 44 struct drm_crtc *crtc,
81 struct drm_framebuffer *fb, 45 struct drm_framebuffer *fb,
diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h
index 26bb55e9e8b6..83bb156d4356 100644
--- a/include/drm/drm_rect.h
+++ b/include/drm/drm_rect.h
@@ -162,7 +162,8 @@ int drm_rect_calc_hscale_relaxed(struct drm_rect *src,
162int drm_rect_calc_vscale_relaxed(struct drm_rect *src, 162int drm_rect_calc_vscale_relaxed(struct drm_rect *src,
163 struct drm_rect *dst, 163 struct drm_rect *dst,
164 int min_vscale, int max_vscale); 164 int min_vscale, int max_vscale);
165void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point); 165void drm_rect_debug_print(const char *prefix,
166 const struct drm_rect *r, bool fixed_point);
166void drm_rect_rotate(struct drm_rect *r, 167void drm_rect_rotate(struct drm_rect *r,
167 int width, int height, 168 int width, int height,
168 unsigned int rotation); 169 unsigned int rotation);
diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h
index 30d89e0da2c6..fab13851f95a 100644
--- a/include/drm/i915_component.h
+++ b/include/drm/i915_component.h
@@ -31,47 +31,80 @@
31#define MAX_PORTS 5 31#define MAX_PORTS 5
32 32
33/** 33/**
34 * struct i915_audio_component_ops - callbacks defined in gfx driver 34 * struct i915_audio_component_ops - Ops implemented by i915 driver, called by hda driver
35 * @owner: the module owner
36 * @get_power: get the POWER_DOMAIN_AUDIO power well
37 * @put_power: put the POWER_DOMAIN_AUDIO power well
38 * @codec_wake_override: Enable/Disable generating the codec wake signal
39 * @get_cdclk_freq: get the Core Display Clock in KHz
40 * @sync_audio_rate: set n/cts based on the sample rate
41 */ 35 */
42struct i915_audio_component_ops { 36struct i915_audio_component_ops {
37 /**
38 * @owner: i915 module
39 */
43 struct module *owner; 40 struct module *owner;
41 /**
42 * @get_power: get the POWER_DOMAIN_AUDIO power well
43 *
44 * Request the power well to be turned on.
45 */
44 void (*get_power)(struct device *); 46 void (*get_power)(struct device *);
47 /**
48 * @put_power: put the POWER_DOMAIN_AUDIO power well
49 *
50 * Allow the power well to be turned off.
51 */
45 void (*put_power)(struct device *); 52 void (*put_power)(struct device *);
53 /**
54 * @codec_wake_override: Enable/disable codec wake signal
55 */
46 void (*codec_wake_override)(struct device *, bool enable); 56 void (*codec_wake_override)(struct device *, bool enable);
57 /**
58 * @get_cdclk_freq: Get the Core Display Clock in kHz
59 */
47 int (*get_cdclk_freq)(struct device *); 60 int (*get_cdclk_freq)(struct device *);
61 /**
62 * @sync_audio_rate: set n/cts based on the sample rate
63 *
64 * Called from audio driver. After audio driver sets the
65 * sample rate, it will call this function to set n/cts
66 */
48 int (*sync_audio_rate)(struct device *, int port, int rate); 67 int (*sync_audio_rate)(struct device *, int port, int rate);
49}; 68};
50 69
70/**
71 * struct i915_audio_component_audio_ops - Ops implemented by hda driver, called by i915 driver
72 */
51struct i915_audio_component_audio_ops { 73struct i915_audio_component_audio_ops {
74 /**
75 * @audio_ptr: Pointer to be used in call to pin_eld_notify
76 */
52 void *audio_ptr; 77 void *audio_ptr;
53 /** 78 /**
54 * Call from i915 driver, notifying the HDA driver that 79 * @pin_eld_notify: Notify the HDA driver that pin sense and/or ELD information has changed
55 * pin sense and/or ELD information has changed. 80 *
56 * @audio_ptr: HDA driver object 81 * Called when the i915 driver has set up audio pipeline or has just
57 * @port: Which port has changed (PORTA / PORTB / PORTC etc) 82 * begun to tear it down. This allows the HDA driver to update its
83 * status accordingly (even when the HDA controller is in power save
84 * mode).
58 */ 85 */
59 void (*pin_eld_notify)(void *audio_ptr, int port); 86 void (*pin_eld_notify)(void *audio_ptr, int port);
60}; 87};
61 88
62/** 89/**
63 * struct i915_audio_component - used for audio video interaction 90 * struct i915_audio_component - Used for direct communication between i915 and hda drivers
64 * @dev: the device from gfx driver
65 * @aud_sample_rate: the array of audio sample rate per port
66 * @ops: callback for audio driver calling
67 * @audio_ops: Call from i915 driver
68 */ 91 */
69struct i915_audio_component { 92struct i915_audio_component {
93 /**
94 * @dev: i915 device, used as parameter for ops
95 */
70 struct device *dev; 96 struct device *dev;
97 /**
98 * @aud_sample_rate: the array of audio sample rate per port
99 */
71 int aud_sample_rate[MAX_PORTS]; 100 int aud_sample_rate[MAX_PORTS];
72 101 /**
102 * @ops: Ops implemented by i915 driver, called by hda driver
103 */
73 const struct i915_audio_component_ops *ops; 104 const struct i915_audio_component_ops *ops;
74 105 /**
106 * @audio_ops: Ops implemented by hda driver, called by i915 driver
107 */
75 const struct i915_audio_component_audio_ops *audio_ops; 108 const struct i915_audio_component_audio_ops *audio_ops;
76}; 109};
77 110
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 17c445612e01..f97020904717 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -279,16 +279,59 @@
279#define INTEL_SKL_GT3_IDS(info) \ 279#define INTEL_SKL_GT3_IDS(info) \
280 INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \ 280 INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
281 INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \ 281 INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
282 INTEL_VGA_DEVICE(0x192A, info) /* SRV GT3 */ \ 282 INTEL_VGA_DEVICE(0x192A, info) /* SRV GT3 */
283 283
284#define INTEL_SKL_IDS(info) \ 284#define INTEL_SKL_GT4_IDS(info) \
285 INTEL_VGA_DEVICE(0x1932, info), /* DT GT4 */ \
286 INTEL_VGA_DEVICE(0x193B, info), /* Halo GT4 */ \
287 INTEL_VGA_DEVICE(0x193D, info), /* WKS GT4 */ \
288 INTEL_VGA_DEVICE(0x193A, info) /* SRV GT4 */
289
290#define INTEL_SKL_IDS(info) \
285 INTEL_SKL_GT1_IDS(info), \ 291 INTEL_SKL_GT1_IDS(info), \
286 INTEL_SKL_GT2_IDS(info), \ 292 INTEL_SKL_GT2_IDS(info), \
287 INTEL_SKL_GT3_IDS(info) 293 INTEL_SKL_GT3_IDS(info), \
294 INTEL_SKL_GT4_IDS(info)
288 295
289#define INTEL_BXT_IDS(info) \ 296#define INTEL_BXT_IDS(info) \
290 INTEL_VGA_DEVICE(0x0A84, info), \ 297 INTEL_VGA_DEVICE(0x0A84, info), \
291 INTEL_VGA_DEVICE(0x1A84, info), \ 298 INTEL_VGA_DEVICE(0x1A84, info), \
292 INTEL_VGA_DEVICE(0x5A84, info) 299 INTEL_VGA_DEVICE(0x5A84, info)
293 300
301#define INTEL_KBL_GT1_IDS(info) \
302 INTEL_VGA_DEVICE(0x5913, info), /* ULT GT1.5 */ \
303 INTEL_VGA_DEVICE(0x5915, info), /* ULX GT1.5 */ \
304 INTEL_VGA_DEVICE(0x5917, info), /* DT GT1.5 */ \
305 INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \
306 INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \
307 INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \
308 INTEL_VGA_DEVICE(0x590B, info), /* Halo GT1 */ \
309 INTEL_VGA_DEVICE(0x590A, info) /* SRV GT1 */
310
311#define INTEL_KBL_GT2_IDS(info) \
312 INTEL_VGA_DEVICE(0x5916, info), /* ULT GT2 */ \
313 INTEL_VGA_DEVICE(0x5921, info), /* ULT GT2F */ \
314 INTEL_VGA_DEVICE(0x591E, info), /* ULX GT2 */ \
315 INTEL_VGA_DEVICE(0x5912, info), /* DT GT2 */ \
316 INTEL_VGA_DEVICE(0x591B, info), /* Halo GT2 */ \
317 INTEL_VGA_DEVICE(0x591A, info), /* SRV GT2 */ \
318 INTEL_VGA_DEVICE(0x591D, info) /* WKS GT2 */
319
320#define INTEL_KBL_GT3_IDS(info) \
321 INTEL_VGA_DEVICE(0x5926, info), /* ULT GT3 */ \
322 INTEL_VGA_DEVICE(0x592B, info), /* Halo GT3 */ \
323 INTEL_VGA_DEVICE(0x592A, info) /* SRV GT3 */
324
325#define INTEL_KBL_GT4_IDS(info) \
326 INTEL_VGA_DEVICE(0x5932, info), /* DT GT4 */ \
327 INTEL_VGA_DEVICE(0x593B, info), /* Halo GT4 */ \
328 INTEL_VGA_DEVICE(0x593A, info), /* SRV GT4 */ \
329 INTEL_VGA_DEVICE(0x593D, info) /* WKS GT4 */
330
331#define INTEL_KBL_IDS(info) \
332 INTEL_KBL_GT1_IDS(info), \
333 INTEL_KBL_GT2_IDS(info), \
334 INTEL_KBL_GT3_IDS(info), \
335 INTEL_KBL_GT4_IDS(info)
336
294#endif /* _I915_PCIIDS_H */ 337#endif /* _I915_PCIIDS_H */
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 813042cede57..3d4bf08aa21f 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -826,10 +826,10 @@ static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
826 * reserved, the validation sequence is checked against the validation 826 * reserved, the validation sequence is checked against the validation
827 * sequence of the process currently reserving the buffer, 827 * sequence of the process currently reserving the buffer,
828 * and if the current validation sequence is greater than that of the process 828 * and if the current validation sequence is greater than that of the process
829 * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps 829 * holding the reservation, the function returns -EDEADLK. Otherwise it sleeps
830 * waiting for the buffer to become unreserved, after which it retries 830 * waiting for the buffer to become unreserved, after which it retries
831 * reserving. 831 * reserving.
832 * The caller should, when receiving an -EAGAIN error 832 * The caller should, when receiving an -EDEADLK error
833 * release all its buffer reservations, wait for @bo to become unreserved, and 833 * release all its buffer reservations, wait for @bo to become unreserved, and
834 * then rerun the validation with the same validation sequence. This procedure 834 * then rerun the validation with the same validation sequence. This procedure
835 * will always guarantee that the process with the lowest validation sequence 835 * will always guarantee that the process with the lowest validation sequence
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 9c747cb14ad8..d2f41477f8ae 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -342,10 +342,10 @@ int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid,
342 struct irq_phys_map *map, bool level); 342 struct irq_phys_map *map, bool level);
343void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg); 343void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
344int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); 344int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
345int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu);
346struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, 345struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu,
347 int virt_irq, int irq); 346 int virt_irq, int irq);
348int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map); 347int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map);
348bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map);
349 349
350#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) 350#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
351#define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus)) 351#define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus))
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 054833939995..1991aea2ec4c 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -870,8 +870,8 @@ static inline int acpi_dev_get_property(struct acpi_device *adev,
870} 870}
871 871
872static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode, 872static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode,
873 const char *name, const char *cells_name, 873 const char *name, size_t index,
874 size_t index, struct acpi_reference_args *args) 874 struct acpi_reference_args *args)
875{ 875{
876 return -ENXIO; 876 return -ENXIO;
877} 877}
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 3fe27f8d91f0..0169ba2e2e64 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -254,6 +254,7 @@ struct queue_limits {
254 unsigned long virt_boundary_mask; 254 unsigned long virt_boundary_mask;
255 255
256 unsigned int max_hw_sectors; 256 unsigned int max_hw_sectors;
257 unsigned int max_dev_sectors;
257 unsigned int chunk_sectors; 258 unsigned int chunk_sectors;
258 unsigned int max_sectors; 259 unsigned int max_sectors;
259 unsigned int max_segment_size; 260 unsigned int max_segment_size;
@@ -773,7 +774,6 @@ extern void blk_rq_set_block_pc(struct request *);
773extern void blk_requeue_request(struct request_queue *, struct request *); 774extern void blk_requeue_request(struct request_queue *, struct request *);
774extern void blk_add_request_payload(struct request *rq, struct page *page, 775extern void blk_add_request_payload(struct request *rq, struct page *page,
775 unsigned int len); 776 unsigned int len);
776extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
777extern int blk_lld_busy(struct request_queue *q); 777extern int blk_lld_busy(struct request_queue *q);
778extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 778extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
779 struct bio_set *bs, gfp_t gfp_mask, 779 struct bio_set *bs, gfp_t gfp_mask,
@@ -794,6 +794,8 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
794extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, 794extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
795 struct scsi_ioctl_command __user *); 795 struct scsi_ioctl_command __user *);
796 796
797extern int blk_queue_enter(struct request_queue *q, gfp_t gfp);
798extern void blk_queue_exit(struct request_queue *q);
797extern void blk_start_queue(struct request_queue *q); 799extern void blk_start_queue(struct request_queue *q);
798extern void blk_stop_queue(struct request_queue *q); 800extern void blk_stop_queue(struct request_queue *q);
799extern void blk_sync_queue(struct request_queue *q); 801extern void blk_sync_queue(struct request_queue *q);
@@ -958,7 +960,6 @@ extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
958extern void blk_cleanup_queue(struct request_queue *); 960extern void blk_cleanup_queue(struct request_queue *);
959extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 961extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
960extern void blk_queue_bounce_limit(struct request_queue *, u64); 962extern void blk_queue_bounce_limit(struct request_queue *, u64);
961extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int);
962extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 963extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
963extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); 964extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
964extern void blk_queue_max_segments(struct request_queue *, unsigned short); 965extern void blk_queue_max_segments(struct request_queue *, unsigned short);
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index de464e6683b6..83d1926c61e4 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -40,6 +40,7 @@ struct bpf_map {
40 struct user_struct *user; 40 struct user_struct *user;
41 const struct bpf_map_ops *ops; 41 const struct bpf_map_ops *ops;
42 struct work_struct work; 42 struct work_struct work;
43 atomic_t usercnt;
43}; 44};
44 45
45struct bpf_map_type_list { 46struct bpf_map_type_list {
@@ -167,8 +168,10 @@ struct bpf_prog *bpf_prog_get(u32 ufd);
167void bpf_prog_put(struct bpf_prog *prog); 168void bpf_prog_put(struct bpf_prog *prog);
168void bpf_prog_put_rcu(struct bpf_prog *prog); 169void bpf_prog_put_rcu(struct bpf_prog *prog);
169 170
170struct bpf_map *bpf_map_get(u32 ufd); 171struct bpf_map *bpf_map_get_with_uref(u32 ufd);
171struct bpf_map *__bpf_map_get(struct fd f); 172struct bpf_map *__bpf_map_get(struct fd f);
173void bpf_map_inc(struct bpf_map *map, bool uref);
174void bpf_map_put_with_uref(struct bpf_map *map);
172void bpf_map_put(struct bpf_map *map); 175void bpf_map_put(struct bpf_map *map);
173 176
174extern int sysctl_unprivileged_bpf_disabled; 177extern int sysctl_unprivileged_bpf_disabled;
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index a8a335b7fce0..758a029011b1 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -197,6 +197,16 @@ static inline struct configfs_subsystem *to_configfs_subsystem(struct config_gro
197int configfs_register_subsystem(struct configfs_subsystem *subsys); 197int configfs_register_subsystem(struct configfs_subsystem *subsys);
198void configfs_unregister_subsystem(struct configfs_subsystem *subsys); 198void configfs_unregister_subsystem(struct configfs_subsystem *subsys);
199 199
200int configfs_register_group(struct config_group *parent_group,
201 struct config_group *group);
202void configfs_unregister_group(struct config_group *group);
203
204struct config_group *
205configfs_register_default_group(struct config_group *parent_group,
206 const char *name,
207 struct config_item_type *item_type);
208void configfs_unregister_default_group(struct config_group *group);
209
200/* These functions can sleep and can alloc with GFP_KERNEL */ 210/* These functions can sleep and can alloc with GFP_KERNEL */
201/* WARNING: These cannot be called underneath configfs callbacks!! */ 211/* WARNING: These cannot be called underneath configfs callbacks!! */
202int configfs_depend_item(struct configfs_subsystem *subsys, struct config_item *target); 212int configfs_depend_item(struct configfs_subsystem *subsys, struct config_item *target);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index ef4c5b1a860f..177c7680c1a8 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -77,6 +77,7 @@ struct cpufreq_policy {
77 unsigned int suspend_freq; /* freq to set during suspend */ 77 unsigned int suspend_freq; /* freq to set during suspend */
78 78
79 unsigned int policy; /* see above */ 79 unsigned int policy; /* see above */
80 unsigned int last_policy; /* policy before unplug */
80 struct cpufreq_governor *governor; /* see below */ 81 struct cpufreq_governor *governor; /* see below */
81 void *governor_data; 82 void *governor_data;
82 bool governor_enabled; /* governor start/stop flag */ 83 bool governor_enabled; /* governor start/stop flag */
diff --git a/include/linux/dns_resolver.h b/include/linux/dns_resolver.h
index cc92268af89a..6ac3cad9aef1 100644
--- a/include/linux/dns_resolver.h
+++ b/include/linux/dns_resolver.h
@@ -27,7 +27,7 @@
27#ifdef __KERNEL__ 27#ifdef __KERNEL__
28 28
29extern int dns_query(const char *type, const char *name, size_t namelen, 29extern int dns_query(const char *type, const char *name, size_t namelen,
30 const char *options, char **_result, time_t *_expiry); 30 const char *options, char **_result, time64_t *_expiry);
31 31
32#endif /* KERNEL */ 32#endif /* KERNEL */
33 33
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 6523109e136d..8942af0813e3 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -271,7 +271,7 @@ static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
271 271
272static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) 272static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
273{ 273{
274 return gfp_flags & __GFP_DIRECT_RECLAIM; 274 return (bool __force)(gfp_flags & __GFP_DIRECT_RECLAIM);
275} 275}
276 276
277#ifdef CONFIG_HIGHMEM 277#ifdef CONFIG_HIGHMEM
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 0ef2a97ccdb5..402753bccafa 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -227,7 +227,7 @@ struct ipv6_pinfo {
227 struct ipv6_ac_socklist *ipv6_ac_list; 227 struct ipv6_ac_socklist *ipv6_ac_list;
228 struct ipv6_fl_socklist __rcu *ipv6_fl_list; 228 struct ipv6_fl_socklist __rcu *ipv6_fl_list;
229 229
230 struct ipv6_txoptions *opt; 230 struct ipv6_txoptions __rcu *opt;
231 struct sk_buff *pktoptions; 231 struct sk_buff *pktoptions;
232 struct sk_buff *rxpmtu; 232 struct sk_buff *rxpmtu;
233 struct inet6_cork cork; 233 struct inet6_cork cork;
diff --git a/include/linux/kref.h b/include/linux/kref.h
index 484604d184be..e15828fd71f1 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -19,7 +19,6 @@
19#include <linux/atomic.h> 19#include <linux/atomic.h>
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/mutex.h> 21#include <linux/mutex.h>
22#include <linux/spinlock.h>
23 22
24struct kref { 23struct kref {
25 atomic_t refcount; 24 atomic_t refcount;
@@ -99,38 +98,6 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)
99 return kref_sub(kref, 1, release); 98 return kref_sub(kref, 1, release);
100} 99}
101 100
102/**
103 * kref_put_spinlock_irqsave - decrement refcount for object.
104 * @kref: object.
105 * @release: pointer to the function that will clean up the object when the
106 * last reference to the object is released.
107 * This pointer is required, and it is not acceptable to pass kfree
108 * in as this function.
109 * @lock: lock to take in release case
110 *
111 * Behaves identical to kref_put with one exception. If the reference count
112 * drops to zero, the lock will be taken atomically wrt dropping the reference
113 * count. The release function has to call spin_unlock() without _irqrestore.
114 */
115static inline int kref_put_spinlock_irqsave(struct kref *kref,
116 void (*release)(struct kref *kref),
117 spinlock_t *lock)
118{
119 unsigned long flags;
120
121 WARN_ON(release == NULL);
122 if (atomic_add_unless(&kref->refcount, -1, 1))
123 return 0;
124 spin_lock_irqsave(lock, flags);
125 if (atomic_dec_and_test(&kref->refcount)) {
126 release(kref);
127 local_irq_restore(flags);
128 return 1;
129 }
130 spin_unlock_irqrestore(lock, flags);
131 return 0;
132}
133
134static inline int kref_put_mutex(struct kref *kref, 101static inline int kref_put_mutex(struct kref *kref,
135 void (*release)(struct kref *kref), 102 void (*release)(struct kref *kref),
136 struct mutex *lock) 103 struct mutex *lock)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 5706a2108f0a..c923350ca20a 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -460,6 +460,17 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
460 (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \ 460 (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
461 idx++) 461 idx++)
462 462
463static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
464{
465 struct kvm_vcpu *vcpu;
466 int i;
467
468 kvm_for_each_vcpu(i, vcpu, kvm)
469 if (vcpu->vcpu_id == id)
470 return vcpu;
471 return NULL;
472}
473
463#define kvm_for_each_memslot(memslot, slots) \ 474#define kvm_for_each_memslot(memslot, slots) \
464 for (memslot = &slots->memslots[0]; \ 475 for (memslot = &slots->memslots[0]; \
465 memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\ 476 memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 69c9057e1ab8..c6916aec43b6 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -58,7 +58,6 @@ enum {
58struct nvm_id_group { 58struct nvm_id_group {
59 u8 mtype; 59 u8 mtype;
60 u8 fmtype; 60 u8 fmtype;
61 u16 res16;
62 u8 num_ch; 61 u8 num_ch;
63 u8 num_lun; 62 u8 num_lun;
64 u8 num_pln; 63 u8 num_pln;
@@ -74,9 +73,9 @@ struct nvm_id_group {
74 u32 tbet; 73 u32 tbet;
75 u32 tbem; 74 u32 tbem;
76 u32 mpos; 75 u32 mpos;
76 u32 mccap;
77 u16 cpar; 77 u16 cpar;
78 u8 res[913]; 78};
79} __packed;
80 79
81struct nvm_addr_format { 80struct nvm_addr_format {
82 u8 ch_offset; 81 u8 ch_offset;
@@ -91,19 +90,15 @@ struct nvm_addr_format {
91 u8 pg_len; 90 u8 pg_len;
92 u8 sect_offset; 91 u8 sect_offset;
93 u8 sect_len; 92 u8 sect_len;
94 u8 res[4];
95}; 93};
96 94
97struct nvm_id { 95struct nvm_id {
98 u8 ver_id; 96 u8 ver_id;
99 u8 vmnt; 97 u8 vmnt;
100 u8 cgrps; 98 u8 cgrps;
101 u8 res[5];
102 u32 cap; 99 u32 cap;
103 u32 dom; 100 u32 dom;
104 struct nvm_addr_format ppaf; 101 struct nvm_addr_format ppaf;
105 u8 ppat;
106 u8 resv[224];
107 struct nvm_id_group groups[4]; 102 struct nvm_id_group groups[4];
108} __packed; 103} __packed;
109 104
@@ -123,39 +118,28 @@ struct nvm_tgt_instance {
123#define NVM_VERSION_MINOR 0 118#define NVM_VERSION_MINOR 0
124#define NVM_VERSION_PATCH 0 119#define NVM_VERSION_PATCH 0
125 120
126#define NVM_SEC_BITS (8)
127#define NVM_PL_BITS (6)
128#define NVM_PG_BITS (16)
129#define NVM_BLK_BITS (16) 121#define NVM_BLK_BITS (16)
130#define NVM_LUN_BITS (10) 122#define NVM_PG_BITS (16)
123#define NVM_SEC_BITS (8)
124#define NVM_PL_BITS (8)
125#define NVM_LUN_BITS (8)
131#define NVM_CH_BITS (8) 126#define NVM_CH_BITS (8)
132 127
133struct ppa_addr { 128struct ppa_addr {
129 /* Generic structure for all addresses */
134 union { 130 union {
135 /* Channel-based PPA format in nand 4x2x2x2x8x10 */
136 struct {
137 u64 ch : 4;
138 u64 sec : 2; /* 4 sectors per page */
139 u64 pl : 2; /* 4 planes per LUN */
140 u64 lun : 2; /* 4 LUNs per channel */
141 u64 pg : 8; /* 256 pages per block */
142 u64 blk : 10;/* 1024 blocks per plane */
143 u64 resved : 36;
144 } chnl;
145
146 /* Generic structure for all addresses */
147 struct { 131 struct {
132 u64 blk : NVM_BLK_BITS;
133 u64 pg : NVM_PG_BITS;
148 u64 sec : NVM_SEC_BITS; 134 u64 sec : NVM_SEC_BITS;
149 u64 pl : NVM_PL_BITS; 135 u64 pl : NVM_PL_BITS;
150 u64 pg : NVM_PG_BITS;
151 u64 blk : NVM_BLK_BITS;
152 u64 lun : NVM_LUN_BITS; 136 u64 lun : NVM_LUN_BITS;
153 u64 ch : NVM_CH_BITS; 137 u64 ch : NVM_CH_BITS;
154 } g; 138 } g;
155 139
156 u64 ppa; 140 u64 ppa;
157 }; 141 };
158} __packed; 142};
159 143
160struct nvm_rq { 144struct nvm_rq {
161 struct nvm_tgt_instance *ins; 145 struct nvm_tgt_instance *ins;
@@ -191,11 +175,11 @@ static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
191struct nvm_block; 175struct nvm_block;
192 176
193typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *); 177typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
194typedef int (nvm_bb_update_fn)(u32, void *, unsigned int, void *); 178typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *);
195typedef int (nvm_id_fn)(struct request_queue *, struct nvm_id *); 179typedef int (nvm_id_fn)(struct request_queue *, struct nvm_id *);
196typedef int (nvm_get_l2p_tbl_fn)(struct request_queue *, u64, u32, 180typedef int (nvm_get_l2p_tbl_fn)(struct request_queue *, u64, u32,
197 nvm_l2p_update_fn *, void *); 181 nvm_l2p_update_fn *, void *);
198typedef int (nvm_op_bb_tbl_fn)(struct request_queue *, int, unsigned int, 182typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int,
199 nvm_bb_update_fn *, void *); 183 nvm_bb_update_fn *, void *);
200typedef int (nvm_op_set_bb_fn)(struct request_queue *, struct nvm_rq *, int); 184typedef int (nvm_op_set_bb_fn)(struct request_queue *, struct nvm_rq *, int);
201typedef int (nvm_submit_io_fn)(struct request_queue *, struct nvm_rq *); 185typedef int (nvm_submit_io_fn)(struct request_queue *, struct nvm_rq *);
@@ -210,7 +194,7 @@ struct nvm_dev_ops {
210 nvm_id_fn *identity; 194 nvm_id_fn *identity;
211 nvm_get_l2p_tbl_fn *get_l2p_tbl; 195 nvm_get_l2p_tbl_fn *get_l2p_tbl;
212 nvm_op_bb_tbl_fn *get_bb_tbl; 196 nvm_op_bb_tbl_fn *get_bb_tbl;
213 nvm_op_set_bb_fn *set_bb; 197 nvm_op_set_bb_fn *set_bb_tbl;
214 198
215 nvm_submit_io_fn *submit_io; 199 nvm_submit_io_fn *submit_io;
216 nvm_erase_blk_fn *erase_block; 200 nvm_erase_blk_fn *erase_block;
@@ -220,7 +204,7 @@ struct nvm_dev_ops {
220 nvm_dev_dma_alloc_fn *dev_dma_alloc; 204 nvm_dev_dma_alloc_fn *dev_dma_alloc;
221 nvm_dev_dma_free_fn *dev_dma_free; 205 nvm_dev_dma_free_fn *dev_dma_free;
222 206
223 uint8_t max_phys_sect; 207 unsigned int max_phys_sect;
224}; 208};
225 209
226struct nvm_lun { 210struct nvm_lun {
@@ -229,7 +213,9 @@ struct nvm_lun {
229 int lun_id; 213 int lun_id;
230 int chnl_id; 214 int chnl_id;
231 215
216 unsigned int nr_inuse_blocks; /* Number of used blocks */
232 unsigned int nr_free_blocks; /* Number of unused blocks */ 217 unsigned int nr_free_blocks; /* Number of unused blocks */
218 unsigned int nr_bad_blocks; /* Number of bad blocks */
233 struct nvm_block *blocks; 219 struct nvm_block *blocks;
234 220
235 spinlock_t lock; 221 spinlock_t lock;
@@ -263,8 +249,7 @@ struct nvm_dev {
263 int blks_per_lun; 249 int blks_per_lun;
264 int sec_size; 250 int sec_size;
265 int oob_size; 251 int oob_size;
266 int addr_mode; 252 struct nvm_addr_format ppaf;
267 struct nvm_addr_format addr_format;
268 253
269 /* Calculated/Cached values. These do not reflect the actual usable 254 /* Calculated/Cached values. These do not reflect the actual usable
270 * blocks at run-time. 255 * blocks at run-time.
@@ -290,118 +275,45 @@ struct nvm_dev {
290 char name[DISK_NAME_LEN]; 275 char name[DISK_NAME_LEN];
291}; 276};
292 277
293/* fallback conversion */ 278static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
294static struct ppa_addr __generic_to_linear_addr(struct nvm_dev *dev, 279 struct ppa_addr r)
295 struct ppa_addr r)
296{
297 struct ppa_addr l;
298
299 l.ppa = r.g.sec +
300 r.g.pg * dev->sec_per_pg +
301 r.g.blk * (dev->pgs_per_blk *
302 dev->sec_per_pg) +
303 r.g.lun * (dev->blks_per_lun *
304 dev->pgs_per_blk *
305 dev->sec_per_pg) +
306 r.g.ch * (dev->blks_per_lun *
307 dev->pgs_per_blk *
308 dev->luns_per_chnl *
309 dev->sec_per_pg);
310
311 return l;
312}
313
314/* fallback conversion */
315static struct ppa_addr __linear_to_generic_addr(struct nvm_dev *dev,
316 struct ppa_addr r)
317{ 280{
318 struct ppa_addr l; 281 struct ppa_addr l;
319 int secs, pgs, blks, luns;
320 sector_t ppa = r.ppa;
321 282
322 l.ppa = 0; 283 l.ppa = ((u64)r.g.blk) << dev->ppaf.blk_offset;
323 284 l.ppa |= ((u64)r.g.pg) << dev->ppaf.pg_offset;
324 div_u64_rem(ppa, dev->sec_per_pg, &secs); 285 l.ppa |= ((u64)r.g.sec) << dev->ppaf.sect_offset;
325 l.g.sec = secs; 286 l.ppa |= ((u64)r.g.pl) << dev->ppaf.pln_offset;
326 287 l.ppa |= ((u64)r.g.lun) << dev->ppaf.lun_offset;
327 sector_div(ppa, dev->sec_per_pg); 288 l.ppa |= ((u64)r.g.ch) << dev->ppaf.ch_offset;
328 div_u64_rem(ppa, dev->sec_per_blk, &pgs);
329 l.g.pg = pgs;
330
331 sector_div(ppa, dev->pgs_per_blk);
332 div_u64_rem(ppa, dev->blks_per_lun, &blks);
333 l.g.blk = blks;
334
335 sector_div(ppa, dev->blks_per_lun);
336 div_u64_rem(ppa, dev->luns_per_chnl, &luns);
337 l.g.lun = luns;
338
339 sector_div(ppa, dev->luns_per_chnl);
340 l.g.ch = ppa;
341 289
342 return l; 290 return l;
343} 291}
344 292
345static struct ppa_addr __generic_to_chnl_addr(struct ppa_addr r) 293static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
294 struct ppa_addr r)
346{ 295{
347 struct ppa_addr l; 296 struct ppa_addr l;
348 297
349 l.ppa = 0; 298 /*
350 299 * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc.
351 l.chnl.sec = r.g.sec; 300 */
352 l.chnl.pl = r.g.pl; 301 l.g.blk = (r.ppa >> dev->ppaf.blk_offset) &
353 l.chnl.pg = r.g.pg; 302 (((1 << dev->ppaf.blk_len) - 1));
354 l.chnl.blk = r.g.blk; 303 l.g.pg |= (r.ppa >> dev->ppaf.pg_offset) &
355 l.chnl.lun = r.g.lun; 304 (((1 << dev->ppaf.pg_len) - 1));
356 l.chnl.ch = r.g.ch; 305 l.g.sec |= (r.ppa >> dev->ppaf.sect_offset) &
357 306 (((1 << dev->ppaf.sect_len) - 1));
358 return l; 307 l.g.pl |= (r.ppa >> dev->ppaf.pln_offset) &
359} 308 (((1 << dev->ppaf.pln_len) - 1));
360 309 l.g.lun |= (r.ppa >> dev->ppaf.lun_offset) &
361static struct ppa_addr __chnl_to_generic_addr(struct ppa_addr r) 310 (((1 << dev->ppaf.lun_len) - 1));
362{ 311 l.g.ch |= (r.ppa >> dev->ppaf.ch_offset) &
363 struct ppa_addr l; 312 (((1 << dev->ppaf.ch_len) - 1));
364
365 l.ppa = 0;
366
367 l.g.sec = r.chnl.sec;
368 l.g.pl = r.chnl.pl;
369 l.g.pg = r.chnl.pg;
370 l.g.blk = r.chnl.blk;
371 l.g.lun = r.chnl.lun;
372 l.g.ch = r.chnl.ch;
373 313
374 return l; 314 return l;
375} 315}
376 316
377static inline struct ppa_addr addr_to_generic_mode(struct nvm_dev *dev,
378 struct ppa_addr gppa)
379{
380 switch (dev->addr_mode) {
381 case NVM_ADDRMODE_LINEAR:
382 return __linear_to_generic_addr(dev, gppa);
383 case NVM_ADDRMODE_CHANNEL:
384 return __chnl_to_generic_addr(gppa);
385 default:
386 BUG();
387 }
388 return gppa;
389}
390
391static inline struct ppa_addr generic_to_addr_mode(struct nvm_dev *dev,
392 struct ppa_addr gppa)
393{
394 switch (dev->addr_mode) {
395 case NVM_ADDRMODE_LINEAR:
396 return __generic_to_linear_addr(dev, gppa);
397 case NVM_ADDRMODE_CHANNEL:
398 return __generic_to_chnl_addr(gppa);
399 default:
400 BUG();
401 }
402 return gppa;
403}
404
405static inline int ppa_empty(struct ppa_addr ppa_addr) 317static inline int ppa_empty(struct ppa_addr ppa_addr)
406{ 318{
407 return (ppa_addr.ppa == ADDR_EMPTY); 319 return (ppa_addr.ppa == ADDR_EMPTY);
@@ -468,7 +380,7 @@ typedef int (nvmm_end_io_fn)(struct nvm_rq *, int);
468typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *, 380typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
469 unsigned long); 381 unsigned long);
470typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int); 382typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
471typedef void (nvmm_free_blocks_print_fn)(struct nvm_dev *); 383typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);
472 384
473struct nvmm_type { 385struct nvmm_type {
474 const char *name; 386 const char *name;
@@ -492,7 +404,7 @@ struct nvmm_type {
492 nvmm_get_lun_fn *get_lun; 404 nvmm_get_lun_fn *get_lun;
493 405
494 /* Statistics */ 406 /* Statistics */
495 nvmm_free_blocks_print_fn *free_blocks_print; 407 nvmm_lun_info_print_fn *lun_info_print;
496 struct list_head list; 408 struct list_head list;
497}; 409};
498 410
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
index e6982ac3200d..a57f0dfb6db7 100644
--- a/include/linux/marvell_phy.h
+++ b/include/linux/marvell_phy.h
@@ -16,6 +16,7 @@
16#define MARVELL_PHY_ID_88E1318S 0x01410e90 16#define MARVELL_PHY_ID_88E1318S 0x01410e90
17#define MARVELL_PHY_ID_88E1116R 0x01410e40 17#define MARVELL_PHY_ID_88E1116R 0x01410e40
18#define MARVELL_PHY_ID_88E1510 0x01410dd0 18#define MARVELL_PHY_ID_88E1510 0x01410dd0
19#define MARVELL_PHY_ID_88E1540 0x01410eb0
19#define MARVELL_PHY_ID_88E3016 0x01410e60 20#define MARVELL_PHY_ID_88E3016 0x01410e60
20 21
21/* struct phy_device dev_flags definitions */ 22/* struct phy_device dev_flags definitions */
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index dd2097455a2e..1565324eb620 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -453,26 +453,28 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
453 u8 lro_cap[0x1]; 453 u8 lro_cap[0x1];
454 u8 lro_psh_flag[0x1]; 454 u8 lro_psh_flag[0x1];
455 u8 lro_time_stamp[0x1]; 455 u8 lro_time_stamp[0x1];
456 u8 reserved_0[0x6]; 456 u8 reserved_0[0x3];
457 u8 self_lb_en_modifiable[0x1];
458 u8 reserved_1[0x2];
457 u8 max_lso_cap[0x5]; 459 u8 max_lso_cap[0x5];
458 u8 reserved_1[0x4]; 460 u8 reserved_2[0x4];
459 u8 rss_ind_tbl_cap[0x4]; 461 u8 rss_ind_tbl_cap[0x4];
460 u8 reserved_2[0x3]; 462 u8 reserved_3[0x3];
461 u8 tunnel_lso_const_out_ip_id[0x1]; 463 u8 tunnel_lso_const_out_ip_id[0x1];
462 u8 reserved_3[0x2]; 464 u8 reserved_4[0x2];
463 u8 tunnel_statless_gre[0x1]; 465 u8 tunnel_statless_gre[0x1];
464 u8 tunnel_stateless_vxlan[0x1]; 466 u8 tunnel_stateless_vxlan[0x1];
465 467
466 u8 reserved_4[0x20]; 468 u8 reserved_5[0x20];
467 469
468 u8 reserved_5[0x10]; 470 u8 reserved_6[0x10];
469 u8 lro_min_mss_size[0x10]; 471 u8 lro_min_mss_size[0x10];
470 472
471 u8 reserved_6[0x120]; 473 u8 reserved_7[0x120];
472 474
473 u8 lro_timer_supported_periods[4][0x20]; 475 u8 lro_timer_supported_periods[4][0x20];
474 476
475 u8 reserved_7[0x600]; 477 u8 reserved_8[0x600];
476}; 478};
477 479
478struct mlx5_ifc_roce_cap_bits { 480struct mlx5_ifc_roce_cap_bits {
@@ -4051,9 +4053,11 @@ struct mlx5_ifc_modify_tis_in_bits {
4051}; 4053};
4052 4054
4053struct mlx5_ifc_modify_tir_bitmask_bits { 4055struct mlx5_ifc_modify_tir_bitmask_bits {
4054 u8 reserved[0x20]; 4056 u8 reserved_0[0x20];
4055 4057
4056 u8 reserved1[0x1f]; 4058 u8 reserved_1[0x1b];
4059 u8 self_lb_en[0x1];
4060 u8 reserved_2[0x3];
4057 u8 lro[0x1]; 4061 u8 lro[0x1];
4058}; 4062};
4059 4063
diff --git a/include/linux/net.h b/include/linux/net.h
index 70ac5e28e6b7..0b4ac7da583a 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -34,8 +34,12 @@ struct inode;
34struct file; 34struct file;
35struct net; 35struct net;
36 36
37#define SOCK_ASYNC_NOSPACE 0 37/* Historically, SOCKWQ_ASYNC_NOSPACE & SOCKWQ_ASYNC_WAITDATA were located
38#define SOCK_ASYNC_WAITDATA 1 38 * in sock->flags, but moved into sk->sk_wq->flags to be RCU protected.
39 * Eventually all flags will be in sk->sk_wq_flags.
40 */
41#define SOCKWQ_ASYNC_NOSPACE 0
42#define SOCKWQ_ASYNC_WAITDATA 1
39#define SOCK_NOSPACE 2 43#define SOCK_NOSPACE 2
40#define SOCK_PASSCRED 3 44#define SOCK_PASSCRED 3
41#define SOCK_PASSSEC 4 45#define SOCK_PASSSEC 4
@@ -89,6 +93,7 @@ struct socket_wq {
89 /* Note: wait MUST be first field of socket_wq */ 93 /* Note: wait MUST be first field of socket_wq */
90 wait_queue_head_t wait; 94 wait_queue_head_t wait;
91 struct fasync_struct *fasync_list; 95 struct fasync_struct *fasync_list;
96 unsigned long flags; /* %SOCKWQ_ASYNC_NOSPACE, etc */
92 struct rcu_head rcu; 97 struct rcu_head rcu;
93} ____cacheline_aligned_in_smp; 98} ____cacheline_aligned_in_smp;
94 99
@@ -96,7 +101,7 @@ struct socket_wq {
96 * struct socket - general BSD socket 101 * struct socket - general BSD socket
97 * @state: socket state (%SS_CONNECTED, etc) 102 * @state: socket state (%SS_CONNECTED, etc)
98 * @type: socket type (%SOCK_STREAM, etc) 103 * @type: socket type (%SOCK_STREAM, etc)
99 * @flags: socket flags (%SOCK_ASYNC_NOSPACE, etc) 104 * @flags: socket flags (%SOCK_NOSPACE, etc)
100 * @ops: protocol specific socket operations 105 * @ops: protocol specific socket operations
101 * @file: File back pointer for gc 106 * @file: File back pointer for gc
102 * @sk: internal networking protocol agnostic socket representation 107 * @sk: internal networking protocol agnostic socket representation
@@ -202,7 +207,7 @@ enum {
202 SOCK_WAKE_URG, 207 SOCK_WAKE_URG,
203}; 208};
204 209
205int sock_wake_async(struct socket *sk, int how, int band); 210int sock_wake_async(struct socket_wq *sk_wq, int how, int band);
206int sock_register(const struct net_proto_family *fam); 211int sock_register(const struct net_proto_family *fam);
207void sock_unregister(int family); 212void sock_unregister(int family);
208int __sock_create(struct net *net, int family, int type, int proto, 213int __sock_create(struct net *net, int family, int type, int proto,
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d20891465247..3b5d134e945a 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1398,7 +1398,8 @@ enum netdev_priv_flags {
1398 * @dma: DMA channel 1398 * @dma: DMA channel
1399 * @mtu: Interface MTU value 1399 * @mtu: Interface MTU value
1400 * @type: Interface hardware type 1400 * @type: Interface hardware type
1401 * @hard_header_len: Hardware header length 1401 * @hard_header_len: Hardware header length, which means that this is the
1402 * minimum size of a packet.
1402 * 1403 *
1403 * @needed_headroom: Extra headroom the hardware may need, but not in all 1404 * @needed_headroom: Extra headroom the hardware may need, but not in all
1404 * cases can this be guaranteed 1405 * cases can this be guaranteed
@@ -2068,20 +2069,23 @@ struct pcpu_sw_netstats {
2068 struct u64_stats_sync syncp; 2069 struct u64_stats_sync syncp;
2069}; 2070};
2070 2071
2071#define netdev_alloc_pcpu_stats(type) \ 2072#define __netdev_alloc_pcpu_stats(type, gfp) \
2072({ \ 2073({ \
2073 typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \ 2074 typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
2074 if (pcpu_stats) { \ 2075 if (pcpu_stats) { \
2075 int __cpu; \ 2076 int __cpu; \
2076 for_each_possible_cpu(__cpu) { \ 2077 for_each_possible_cpu(__cpu) { \
2077 typeof(type) *stat; \ 2078 typeof(type) *stat; \
2078 stat = per_cpu_ptr(pcpu_stats, __cpu); \ 2079 stat = per_cpu_ptr(pcpu_stats, __cpu); \
2079 u64_stats_init(&stat->syncp); \ 2080 u64_stats_init(&stat->syncp); \
2080 } \ 2081 } \
2081 } \ 2082 } \
2082 pcpu_stats; \ 2083 pcpu_stats; \
2083}) 2084})
2084 2085
2086#define netdev_alloc_pcpu_stats(type) \
2087 __netdev_alloc_pcpu_stats(type, GFP_KERNEL);
2088
2085#include <linux/notifier.h> 2089#include <linux/notifier.h>
2086 2090
2087/* netdevice notifier chain. Please remember to update the rtnetlink 2091/* netdevice notifier chain. Please remember to update the rtnetlink
@@ -3854,6 +3858,11 @@ static inline bool netif_is_bridge_master(const struct net_device *dev)
3854 return dev->priv_flags & IFF_EBRIDGE; 3858 return dev->priv_flags & IFF_EBRIDGE;
3855} 3859}
3856 3860
3861static inline bool netif_is_bridge_port(const struct net_device *dev)
3862{
3863 return dev->priv_flags & IFF_BRIDGE_PORT;
3864}
3865
3857static inline bool netif_is_ovs_master(const struct net_device *dev) 3866static inline bool netif_is_ovs_master(const struct net_device *dev)
3858{ 3867{
3859 return dev->priv_flags & IFF_OPENVSWITCH; 3868 return dev->priv_flags & IFF_OPENVSWITCH;
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 48bb01edcf30..0e1f433cc4b7 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -421,7 +421,7 @@ extern void ip_set_free(void *members);
421extern int ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr); 421extern int ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr);
422extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr); 422extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr);
423extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], 423extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[],
424 size_t len); 424 size_t len, size_t align);
425extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[], 425extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
426 struct ip_set_ext *ext); 426 struct ip_set_ext *ext);
427 427
diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h
index 187feabe557c..5fcd375ef175 100644
--- a/include/linux/netfilter_ingress.h
+++ b/include/linux/netfilter_ingress.h
@@ -5,10 +5,13 @@
5#include <linux/netdevice.h> 5#include <linux/netdevice.h>
6 6
7#ifdef CONFIG_NETFILTER_INGRESS 7#ifdef CONFIG_NETFILTER_INGRESS
8static inline int nf_hook_ingress_active(struct sk_buff *skb) 8static inline bool nf_hook_ingress_active(const struct sk_buff *skb)
9{ 9{
10 return nf_hook_list_active(&skb->dev->nf_hooks_ingress, 10#ifdef HAVE_JUMP_LABEL
11 NFPROTO_NETDEV, NF_NETDEV_INGRESS); 11 if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS]))
12 return false;
13#endif
14 return !list_empty(&skb->dev->nf_hooks_ingress);
12} 15}
13 16
14static inline int nf_hook_ingress(struct sk_buff *skb) 17static inline int nf_hook_ingress(struct sk_buff *skb)
@@ -16,8 +19,8 @@ static inline int nf_hook_ingress(struct sk_buff *skb)
16 struct nf_hook_state state; 19 struct nf_hook_state state;
17 20
18 nf_hook_state_init(&state, &skb->dev->nf_hooks_ingress, 21 nf_hook_state_init(&state, &skb->dev->nf_hooks_ingress,
19 NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV, NULL, 22 NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV,
20 skb->dev, NULL, dev_net(skb->dev), NULL); 23 skb->dev, NULL, NULL, dev_net(skb->dev), NULL);
21 return nf_hook_slow(skb, &state); 24 return nf_hook_slow(skb, &state);
22} 25}
23 26
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 570d630f98ae..11bbae44f4cb 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -251,6 +251,7 @@ struct nfs4_layoutget {
251 struct nfs4_layoutget_res res; 251 struct nfs4_layoutget_res res;
252 struct rpc_cred *cred; 252 struct rpc_cred *cred;
253 gfp_t gfp_flags; 253 gfp_t gfp_flags;
254 long timeout;
254}; 255};
255 256
256struct nfs4_getdeviceinfo_args { 257struct nfs4_getdeviceinfo_args {
diff --git a/include/linux/of_dma.h b/include/linux/of_dma.h
index 36112cdd665a..b90d8ec57c1f 100644
--- a/include/linux/of_dma.h
+++ b/include/linux/of_dma.h
@@ -80,7 +80,7 @@ static inline int of_dma_router_register(struct device_node *np,
80static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np, 80static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
81 const char *name) 81 const char *name)
82{ 82{
83 return NULL; 83 return ERR_PTR(-ENODEV);
84} 84}
85 85
86static inline struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec, 86static inline struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec,
diff --git a/include/linux/pci.h b/include/linux/pci.h
index e828e7b4afec..6ae25aae88fd 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -412,9 +412,18 @@ struct pci_host_bridge {
412 void (*release_fn)(struct pci_host_bridge *); 412 void (*release_fn)(struct pci_host_bridge *);
413 void *release_data; 413 void *release_data;
414 unsigned int ignore_reset_delay:1; /* for entire hierarchy */ 414 unsigned int ignore_reset_delay:1; /* for entire hierarchy */
415 /* Resource alignment requirements */
416 resource_size_t (*align_resource)(struct pci_dev *dev,
417 const struct resource *res,
418 resource_size_t start,
419 resource_size_t size,
420 resource_size_t align);
415}; 421};
416 422
417#define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev) 423#define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
424
425struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
426
418void pci_set_host_bridge_release(struct pci_host_bridge *bridge, 427void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
419 void (*release_fn)(struct pci_host_bridge *), 428 void (*release_fn)(struct pci_host_bridge *),
420 void *release_data); 429 void *release_data);
diff --git a/include/linux/scpi_protocol.h b/include/linux/scpi_protocol.h
index 80af3cd35ae4..72ce932c69b2 100644
--- a/include/linux/scpi_protocol.h
+++ b/include/linux/scpi_protocol.h
@@ -71,7 +71,7 @@ struct scpi_ops {
71 int (*sensor_get_value)(u16, u32 *); 71 int (*sensor_get_value)(u16, u32 *);
72}; 72};
73 73
74#if IS_ENABLED(CONFIG_ARM_SCPI_PROTOCOL) 74#if IS_REACHABLE(CONFIG_ARM_SCPI_PROTOCOL)
75struct scpi_ops *get_scpi_ops(void); 75struct scpi_ops *get_scpi_ops(void);
76#else 76#else
77static inline struct scpi_ops *get_scpi_ops(void) { return NULL; } 77static inline struct scpi_ops *get_scpi_ops(void) { return NULL; }
diff --git a/include/linux/signal.h b/include/linux/signal.h
index ab1e0392b5ac..92557bbce7e7 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -239,7 +239,6 @@ extern int sigprocmask(int, sigset_t *, sigset_t *);
239extern void set_current_blocked(sigset_t *); 239extern void set_current_blocked(sigset_t *);
240extern void __set_current_blocked(const sigset_t *); 240extern void __set_current_blocked(const sigset_t *);
241extern int show_unhandled_signals; 241extern int show_unhandled_signals;
242extern int sigsuspend(sigset_t *);
243 242
244struct sigaction { 243struct sigaction {
245#ifndef __ARCH_HAS_IRIX_SIGACTION 244#ifndef __ARCH_HAS_IRIX_SIGACTION
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 7c82e3b307a3..2037a861e367 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -158,6 +158,24 @@ size_t ksize(const void *);
158#endif 158#endif
159 159
160/* 160/*
161 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
162 * Intended for arches that get misalignment faults even for 64 bit integer
163 * aligned buffers.
164 */
165#ifndef ARCH_SLAB_MINALIGN
166#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
167#endif
168
169/*
170 * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
171 * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
172 * aligned pointers.
173 */
174#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
175#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
176#define __assume_page_alignment __assume_aligned(PAGE_SIZE)
177
178/*
161 * Kmalloc array related definitions 179 * Kmalloc array related definitions
162 */ 180 */
163 181
@@ -286,8 +304,8 @@ static __always_inline int kmalloc_index(size_t size)
286} 304}
287#endif /* !CONFIG_SLOB */ 305#endif /* !CONFIG_SLOB */
288 306
289void *__kmalloc(size_t size, gfp_t flags); 307void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment;
290void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags); 308void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment;
291void kmem_cache_free(struct kmem_cache *, void *); 309void kmem_cache_free(struct kmem_cache *, void *);
292 310
293/* 311/*
@@ -298,11 +316,11 @@ void kmem_cache_free(struct kmem_cache *, void *);
298 * Note that interrupts must be enabled when calling these functions. 316 * Note that interrupts must be enabled when calling these functions.
299 */ 317 */
300void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); 318void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
301bool kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); 319int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
302 320
303#ifdef CONFIG_NUMA 321#ifdef CONFIG_NUMA
304void *__kmalloc_node(size_t size, gfp_t flags, int node); 322void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment;
305void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 323void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment;
306#else 324#else
307static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) 325static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
308{ 326{
@@ -316,12 +334,12 @@ static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t f
316#endif 334#endif
317 335
318#ifdef CONFIG_TRACING 336#ifdef CONFIG_TRACING
319extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t); 337extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment;
320 338
321#ifdef CONFIG_NUMA 339#ifdef CONFIG_NUMA
322extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, 340extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
323 gfp_t gfpflags, 341 gfp_t gfpflags,
324 int node, size_t size); 342 int node, size_t size) __assume_slab_alignment;
325#else 343#else
326static __always_inline void * 344static __always_inline void *
327kmem_cache_alloc_node_trace(struct kmem_cache *s, 345kmem_cache_alloc_node_trace(struct kmem_cache *s,
@@ -354,10 +372,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
354} 372}
355#endif /* CONFIG_TRACING */ 373#endif /* CONFIG_TRACING */
356 374
357extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order); 375extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment;
358 376
359#ifdef CONFIG_TRACING 377#ifdef CONFIG_TRACING
360extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order); 378extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment;
361#else 379#else
362static __always_inline void * 380static __always_inline void *
363kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) 381kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
@@ -482,15 +500,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
482 return __kmalloc_node(size, flags, node); 500 return __kmalloc_node(size, flags, node);
483} 501}
484 502
485/*
486 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
487 * Intended for arches that get misalignment faults even for 64 bit integer
488 * aligned buffers.
489 */
490#ifndef ARCH_SLAB_MINALIGN
491#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
492#endif
493
494struct memcg_cache_array { 503struct memcg_cache_array {
495 struct rcu_head rcu; 504 struct rcu_head rcu;
496 struct kmem_cache *entries[0]; 505 struct kmem_cache *entries[0];
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index a156b82dd14c..c2b66a277e98 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -524,7 +524,7 @@ asmlinkage long sys_chown(const char __user *filename,
524asmlinkage long sys_lchown(const char __user *filename, 524asmlinkage long sys_lchown(const char __user *filename,
525 uid_t user, gid_t group); 525 uid_t user, gid_t group);
526asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group); 526asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group);
527#ifdef CONFIG_UID16 527#ifdef CONFIG_HAVE_UID16
528asmlinkage long sys_chown16(const char __user *filename, 528asmlinkage long sys_chown16(const char __user *filename,
529 old_uid_t user, old_gid_t group); 529 old_uid_t user, old_gid_t group);
530asmlinkage long sys_lchown16(const char __user *filename, 530asmlinkage long sys_lchown16(const char __user *filename,
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 4014a59828fc..613c29bd6baf 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -438,7 +438,8 @@ static inline void thermal_zone_device_unregister(
438static inline int thermal_zone_bind_cooling_device( 438static inline int thermal_zone_bind_cooling_device(
439 struct thermal_zone_device *tz, int trip, 439 struct thermal_zone_device *tz, int trip,
440 struct thermal_cooling_device *cdev, 440 struct thermal_cooling_device *cdev,
441 unsigned long upper, unsigned long lower) 441 unsigned long upper, unsigned long lower,
442 unsigned int weight)
442{ return -ENODEV; } 443{ return -ENODEV; }
443static inline int thermal_zone_unbind_cooling_device( 444static inline int thermal_zone_unbind_cooling_device(
444 struct thermal_zone_device *tz, int trip, 445 struct thermal_zone_device *tz, int trip,
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 5b04b0a5375b..5e31f1b99037 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -607,7 +607,7 @@ extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops);
607 607
608/* tty_audit.c */ 608/* tty_audit.c */
609#ifdef CONFIG_AUDIT 609#ifdef CONFIG_AUDIT
610extern void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, 610extern void tty_audit_add_data(struct tty_struct *tty, const void *data,
611 size_t size, unsigned icanon); 611 size_t size, unsigned icanon);
612extern void tty_audit_exit(void); 612extern void tty_audit_exit(void);
613extern void tty_audit_fork(struct signal_struct *sig); 613extern void tty_audit_fork(struct signal_struct *sig);
@@ -615,8 +615,8 @@ extern void tty_audit_tiocsti(struct tty_struct *tty, char ch);
615extern void tty_audit_push(struct tty_struct *tty); 615extern void tty_audit_push(struct tty_struct *tty);
616extern int tty_audit_push_current(void); 616extern int tty_audit_push_current(void);
617#else 617#else
618static inline void tty_audit_add_data(struct tty_struct *tty, 618static inline void tty_audit_add_data(struct tty_struct *tty, const void *data,
619 unsigned char *data, size_t size, unsigned icanon) 619 size_t size, unsigned icanon)
620{ 620{
621} 621}
622static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch) 622static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch)
diff --git a/include/linux/types.h b/include/linux/types.h
index 70d8500bddf1..70dd3dfde631 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -35,7 +35,7 @@ typedef __kernel_gid16_t gid16_t;
35 35
36typedef unsigned long uintptr_t; 36typedef unsigned long uintptr_t;
37 37
38#ifdef CONFIG_UID16 38#ifdef CONFIG_HAVE_UID16
39/* This is defined by include/asm-{arch}/posix_types.h */ 39/* This is defined by include/asm-{arch}/posix_types.h */
40typedef __kernel_old_uid_t old_uid_t; 40typedef __kernel_old_uid_t old_uid_t;
41typedef __kernel_old_gid_t old_gid_t; 41typedef __kernel_old_gid_t old_gid_t;
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index b36d837c701e..2a91a0561a47 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -62,6 +62,7 @@ struct unix_sock {
62#define UNIX_GC_CANDIDATE 0 62#define UNIX_GC_CANDIDATE 0
63#define UNIX_GC_MAYBE_CYCLE 1 63#define UNIX_GC_MAYBE_CYCLE 1
64 struct socket_wq peer_wq; 64 struct socket_wq peer_wq;
65 wait_queue_t peer_wake;
65}; 66};
66 67
67static inline struct unix_sock *unix_sk(const struct sock *sk) 68static inline struct unix_sock *unix_sk(const struct sock *sk)
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index aaf9700fc9e5..fb961a576abe 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -167,7 +167,8 @@ static inline void rt6_update_expires(struct rt6_info *rt0, int timeout)
167 167
168static inline u32 rt6_get_cookie(const struct rt6_info *rt) 168static inline u32 rt6_get_cookie(const struct rt6_info *rt)
169{ 169{
170 if (rt->rt6i_flags & RTF_PCPU || unlikely(rt->dst.flags & DST_NOCACHE)) 170 if (rt->rt6i_flags & RTF_PCPU ||
171 (unlikely(rt->dst.flags & DST_NOCACHE) && rt->dst.from))
171 rt = (struct rt6_info *)(rt->dst.from); 172 rt = (struct rt6_info *)(rt->dst.from);
172 173
173 return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; 174 return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 2bfb2ad2fab1..877f682989b8 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -133,27 +133,18 @@ void rt6_clean_tohost(struct net *net, struct in6_addr *gateway);
133/* 133/*
134 * Store a destination cache entry in a socket 134 * Store a destination cache entry in a socket
135 */ 135 */
136static inline void __ip6_dst_store(struct sock *sk, struct dst_entry *dst, 136static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst,
137 const struct in6_addr *daddr, 137 const struct in6_addr *daddr,
138 const struct in6_addr *saddr) 138 const struct in6_addr *saddr)
139{ 139{
140 struct ipv6_pinfo *np = inet6_sk(sk); 140 struct ipv6_pinfo *np = inet6_sk(sk);
141 struct rt6_info *rt = (struct rt6_info *) dst;
142 141
142 np->dst_cookie = rt6_get_cookie((struct rt6_info *)dst);
143 sk_setup_caps(sk, dst); 143 sk_setup_caps(sk, dst);
144 np->daddr_cache = daddr; 144 np->daddr_cache = daddr;
145#ifdef CONFIG_IPV6_SUBTREES 145#ifdef CONFIG_IPV6_SUBTREES
146 np->saddr_cache = saddr; 146 np->saddr_cache = saddr;
147#endif 147#endif
148 np->dst_cookie = rt6_get_cookie(rt);
149}
150
151static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst,
152 struct in6_addr *daddr, struct in6_addr *saddr)
153{
154 spin_lock(&sk->sk_dst_lock);
155 __ip6_dst_store(sk, dst, daddr, saddr);
156 spin_unlock(&sk->sk_dst_lock);
157} 148}
158 149
159static inline bool ipv6_unicast_destination(const struct sk_buff *skb) 150static inline bool ipv6_unicast_destination(const struct sk_buff *skb)
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index aaee6fa02cf1..ff788b665277 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -90,11 +90,12 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
90 err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb); 90 err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb);
91 91
92 if (net_xmit_eval(err) == 0) { 92 if (net_xmit_eval(err) == 0) {
93 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 93 struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats);
94 u64_stats_update_begin(&tstats->syncp); 94 u64_stats_update_begin(&tstats->syncp);
95 tstats->tx_bytes += pkt_len; 95 tstats->tx_bytes += pkt_len;
96 tstats->tx_packets++; 96 tstats->tx_packets++;
97 u64_stats_update_end(&tstats->syncp); 97 u64_stats_update_end(&tstats->syncp);
98 put_cpu_ptr(tstats);
98 } else { 99 } else {
99 stats->tx_errors++; 100 stats->tx_errors++;
100 stats->tx_aborted_errors++; 101 stats->tx_aborted_errors++;
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index f6dafec9102c..62a750a6a8f8 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -287,12 +287,13 @@ static inline void iptunnel_xmit_stats(int err,
287 struct pcpu_sw_netstats __percpu *stats) 287 struct pcpu_sw_netstats __percpu *stats)
288{ 288{
289 if (err > 0) { 289 if (err > 0) {
290 struct pcpu_sw_netstats *tstats = this_cpu_ptr(stats); 290 struct pcpu_sw_netstats *tstats = get_cpu_ptr(stats);
291 291
292 u64_stats_update_begin(&tstats->syncp); 292 u64_stats_update_begin(&tstats->syncp);
293 tstats->tx_bytes += err; 293 tstats->tx_bytes += err;
294 tstats->tx_packets++; 294 tstats->tx_packets++;
295 u64_stats_update_end(&tstats->syncp); 295 u64_stats_update_end(&tstats->syncp);
296 put_cpu_ptr(tstats);
296 } else if (err < 0) { 297 } else if (err < 0) {
297 err_stats->tx_errors++; 298 err_stats->tx_errors++;
298 err_stats->tx_aborted_errors++; 299 err_stats->tx_aborted_errors++;
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index e1a10b0ac0b0..9a5c9f013784 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -205,6 +205,7 @@ extern rwlock_t ip6_ra_lock;
205 */ 205 */
206 206
207struct ipv6_txoptions { 207struct ipv6_txoptions {
208 atomic_t refcnt;
208 /* Length of this structure */ 209 /* Length of this structure */
209 int tot_len; 210 int tot_len;
210 211
@@ -217,7 +218,7 @@ struct ipv6_txoptions {
217 struct ipv6_opt_hdr *dst0opt; 218 struct ipv6_opt_hdr *dst0opt;
218 struct ipv6_rt_hdr *srcrt; /* Routing Header */ 219 struct ipv6_rt_hdr *srcrt; /* Routing Header */
219 struct ipv6_opt_hdr *dst1opt; 220 struct ipv6_opt_hdr *dst1opt;
220 221 struct rcu_head rcu;
221 /* Option buffer, as read by IPV6_PKTOPTIONS, starts here. */ 222 /* Option buffer, as read by IPV6_PKTOPTIONS, starts here. */
222}; 223};
223 224
@@ -252,6 +253,24 @@ struct ipv6_fl_socklist {
252 struct rcu_head rcu; 253 struct rcu_head rcu;
253}; 254};
254 255
256static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np)
257{
258 struct ipv6_txoptions *opt;
259
260 rcu_read_lock();
261 opt = rcu_dereference(np->opt);
262 if (opt && !atomic_inc_not_zero(&opt->refcnt))
263 opt = NULL;
264 rcu_read_unlock();
265 return opt;
266}
267
268static inline void txopt_put(struct ipv6_txoptions *opt)
269{
270 if (opt && atomic_dec_and_test(&opt->refcnt))
271 kfree_rcu(opt, rcu);
272}
273
255struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label); 274struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label);
256struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space, 275struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
257 struct ip6_flowlabel *fl, 276 struct ip6_flowlabel *fl,
@@ -490,6 +509,7 @@ struct ip6_create_arg {
490 u32 user; 509 u32 user;
491 const struct in6_addr *src; 510 const struct in6_addr *src;
492 const struct in6_addr *dst; 511 const struct in6_addr *dst;
512 int iif;
493 u8 ecn; 513 u8 ecn;
494}; 514};
495 515
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 82045fca388b..760bc4d5a2cf 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -2003,8 +2003,10 @@ enum ieee80211_hw_flags {
2003 * it shouldn't be set. 2003 * it shouldn't be set.
2004 * 2004 *
2005 * @max_tx_aggregation_subframes: maximum number of subframes in an 2005 * @max_tx_aggregation_subframes: maximum number of subframes in an
2006 * aggregate an HT driver will transmit, used by the peer as a 2006 * aggregate an HT driver will transmit. Though ADDBA will advertise
2007 * hint to size its reorder buffer. 2007 * a constant value of 64 as some older APs can crash if the window
2008 * size is smaller (an example is LinkSys WRT120N with FW v1.0.07
2009 * build 002 Jun 18 2012).
2008 * 2010 *
2009 * @offchannel_tx_hw_queue: HW queue ID to use for offchannel TX 2011 * @offchannel_tx_hw_queue: HW queue ID to use for offchannel TX
2010 * (if %IEEE80211_HW_QUEUE_CONTROL is set) 2012 * (if %IEEE80211_HW_QUEUE_CONTROL is set)
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index bf3937431030..2d8edaad29cb 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -181,8 +181,7 @@ void ndisc_cleanup(void);
181int ndisc_rcv(struct sk_buff *skb); 181int ndisc_rcv(struct sk_buff *skb);
182 182
183void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit, 183void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit,
184 const struct in6_addr *daddr, const struct in6_addr *saddr, 184 const struct in6_addr *daddr, const struct in6_addr *saddr);
185 struct sk_buff *oskb);
186 185
187void ndisc_send_rs(struct net_device *dev, 186void ndisc_send_rs(struct net_device *dev,
188 const struct in6_addr *saddr, const struct in6_addr *daddr); 187 const struct in6_addr *saddr, const struct in6_addr *daddr);
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index c9149cc0a02d..4bd7508bedc9 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -618,6 +618,8 @@ struct nft_expr_ops {
618 void (*eval)(const struct nft_expr *expr, 618 void (*eval)(const struct nft_expr *expr,
619 struct nft_regs *regs, 619 struct nft_regs *regs,
620 const struct nft_pktinfo *pkt); 620 const struct nft_pktinfo *pkt);
621 int (*clone)(struct nft_expr *dst,
622 const struct nft_expr *src);
621 unsigned int size; 623 unsigned int size;
622 624
623 int (*init)(const struct nft_ctx *ctx, 625 int (*init)(const struct nft_ctx *ctx,
@@ -660,10 +662,20 @@ void nft_expr_destroy(const struct nft_ctx *ctx, struct nft_expr *expr);
660int nft_expr_dump(struct sk_buff *skb, unsigned int attr, 662int nft_expr_dump(struct sk_buff *skb, unsigned int attr,
661 const struct nft_expr *expr); 663 const struct nft_expr *expr);
662 664
663static inline void nft_expr_clone(struct nft_expr *dst, struct nft_expr *src) 665static inline int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src)
664{ 666{
667 int err;
668
665 __module_get(src->ops->type->owner); 669 __module_get(src->ops->type->owner);
666 memcpy(dst, src, src->ops->size); 670 if (src->ops->clone) {
671 dst->ops = src->ops;
672 err = src->ops->clone(dst, src);
673 if (err < 0)
674 return err;
675 } else {
676 memcpy(dst, src, src->ops->size);
677 }
678 return 0;
667} 679}
668 680
669/** 681/**
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 4c79ce8c1f92..b2a8e6338576 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -61,6 +61,9 @@ struct Qdisc {
61 */ 61 */
62#define TCQ_F_WARN_NONWC (1 << 16) 62#define TCQ_F_WARN_NONWC (1 << 16)
63#define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */ 63#define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */
64#define TCQ_F_NOPARENT 0x40 /* root of its hierarchy :
65 * qdisc_tree_decrease_qlen() should stop.
66 */
64 u32 limit; 67 u32 limit;
65 const struct Qdisc_ops *ops; 68 const struct Qdisc_ops *ops;
66 struct qdisc_size_table __rcu *stab; 69 struct qdisc_size_table __rcu *stab;
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 495c87e367b3..7bbb71081aeb 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -775,10 +775,10 @@ struct sctp_transport {
775 hb_sent:1, 775 hb_sent:1,
776 776
777 /* Is the Path MTU update pending on this tranport */ 777 /* Is the Path MTU update pending on this tranport */
778 pmtu_pending:1; 778 pmtu_pending:1,
779 779
780 /* Has this transport moved the ctsn since we last sacked */ 780 /* Has this transport moved the ctsn since we last sacked */
781 __u32 sack_generation; 781 sack_generation:1;
782 u32 dst_cookie; 782 u32 dst_cookie;
783 783
784 struct flowi fl; 784 struct flowi fl;
@@ -1482,19 +1482,19 @@ struct sctp_association {
1482 prsctp_capable:1, /* Can peer do PR-SCTP? */ 1482 prsctp_capable:1, /* Can peer do PR-SCTP? */
1483 auth_capable:1; /* Is peer doing SCTP-AUTH? */ 1483 auth_capable:1; /* Is peer doing SCTP-AUTH? */
1484 1484
1485 /* Ack State : This flag indicates if the next received 1485 /* sack_needed : This flag indicates if the next received
1486 * : packet is to be responded to with a 1486 * : packet is to be responded to with a
1487 * : SACK. This is initializedto 0. When a packet 1487 * : SACK. This is initialized to 0. When a packet
1488 * : is received it is incremented. If this value 1488 * : is received sack_cnt is incremented. If this value
1489 * : reaches 2 or more, a SACK is sent and the 1489 * : reaches 2 or more, a SACK is sent and the
1490 * : value is reset to 0. Note: This is used only 1490 * : value is reset to 0. Note: This is used only
1491 * : when no DATA chunks are received out of 1491 * : when no DATA chunks are received out of
1492 * : order. When DATA chunks are out of order, 1492 * : order. When DATA chunks are out of order,
1493 * : SACK's are not delayed (see Section 6). 1493 * : SACK's are not delayed (see Section 6).
1494 */ 1494 */
1495 __u8 sack_needed; /* Do we need to sack the peer? */ 1495 __u8 sack_needed:1, /* Do we need to sack the peer? */
1496 sack_generation:1;
1496 __u32 sack_cnt; 1497 __u32 sack_cnt;
1497 __u32 sack_generation;
1498 1498
1499 __u32 adaptation_ind; /* Adaptation Code point. */ 1499 __u32 adaptation_ind; /* Adaptation Code point. */
1500 1500
diff --git a/include/net/sock.h b/include/net/sock.h
index bbf7c2cf15b4..52d27ee924f4 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -254,7 +254,6 @@ struct cg_proto;
254 * @sk_wq: sock wait queue and async head 254 * @sk_wq: sock wait queue and async head
255 * @sk_rx_dst: receive input route used by early demux 255 * @sk_rx_dst: receive input route used by early demux
256 * @sk_dst_cache: destination cache 256 * @sk_dst_cache: destination cache
257 * @sk_dst_lock: destination cache lock
258 * @sk_policy: flow policy 257 * @sk_policy: flow policy
259 * @sk_receive_queue: incoming packets 258 * @sk_receive_queue: incoming packets
260 * @sk_wmem_alloc: transmit queue bytes committed 259 * @sk_wmem_alloc: transmit queue bytes committed
@@ -384,14 +383,16 @@ struct sock {
384 int sk_rcvbuf; 383 int sk_rcvbuf;
385 384
386 struct sk_filter __rcu *sk_filter; 385 struct sk_filter __rcu *sk_filter;
387 struct socket_wq __rcu *sk_wq; 386 union {
388 387 struct socket_wq __rcu *sk_wq;
388 struct socket_wq *sk_wq_raw;
389 };
389#ifdef CONFIG_XFRM 390#ifdef CONFIG_XFRM
390 struct xfrm_policy *sk_policy[2]; 391 struct xfrm_policy *sk_policy[2];
391#endif 392#endif
392 struct dst_entry *sk_rx_dst; 393 struct dst_entry *sk_rx_dst;
393 struct dst_entry __rcu *sk_dst_cache; 394 struct dst_entry __rcu *sk_dst_cache;
394 spinlock_t sk_dst_lock; 395 /* Note: 32bit hole on 64bit arches */
395 atomic_t sk_wmem_alloc; 396 atomic_t sk_wmem_alloc;
396 atomic_t sk_omem_alloc; 397 atomic_t sk_omem_alloc;
397 int sk_sndbuf; 398 int sk_sndbuf;
@@ -2005,10 +2006,27 @@ static inline unsigned long sock_wspace(struct sock *sk)
2005 return amt; 2006 return amt;
2006} 2007}
2007 2008
2008static inline void sk_wake_async(struct sock *sk, int how, int band) 2009/* Note:
2010 * We use sk->sk_wq_raw, from contexts knowing this
2011 * pointer is not NULL and cannot disappear/change.
2012 */
2013static inline void sk_set_bit(int nr, struct sock *sk)
2014{
2015 set_bit(nr, &sk->sk_wq_raw->flags);
2016}
2017
2018static inline void sk_clear_bit(int nr, struct sock *sk)
2019{
2020 clear_bit(nr, &sk->sk_wq_raw->flags);
2021}
2022
2023static inline void sk_wake_async(const struct sock *sk, int how, int band)
2009{ 2024{
2010 if (sock_flag(sk, SOCK_FASYNC)) 2025 if (sock_flag(sk, SOCK_FASYNC)) {
2011 sock_wake_async(sk->sk_socket, how, band); 2026 rcu_read_lock();
2027 sock_wake_async(rcu_dereference(sk->sk_wq), how, band);
2028 rcu_read_unlock();
2029 }
2012} 2030}
2013 2031
2014/* Since sk_{r,w}mem_alloc sums skb->truesize, even a small frame might 2032/* Since sk_{r,w}mem_alloc sums skb->truesize, even a small frame might
@@ -2226,6 +2244,31 @@ static inline bool sk_listener(const struct sock *sk)
2226 return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV); 2244 return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
2227} 2245}
2228 2246
2247/**
2248 * sk_state_load - read sk->sk_state for lockless contexts
2249 * @sk: socket pointer
2250 *
2251 * Paired with sk_state_store(). Used in places we do not hold socket lock :
2252 * tcp_diag_get_info(), tcp_get_info(), tcp_poll(), get_tcp4_sock() ...
2253 */
2254static inline int sk_state_load(const struct sock *sk)
2255{
2256 return smp_load_acquire(&sk->sk_state);
2257}
2258
2259/**
2260 * sk_state_store - update sk->sk_state
2261 * @sk: socket pointer
2262 * @newstate: new state
2263 *
2264 * Paired with sk_state_load(). Should be used in contexts where
2265 * state change might impact lockless readers.
2266 */
2267static inline void sk_state_store(struct sock *sk, int newstate)
2268{
2269 smp_store_release(&sk->sk_state, newstate);
2270}
2271
2229void sock_enable_timestamp(struct sock *sk, int flag); 2272void sock_enable_timestamp(struct sock *sk, int flag);
2230int sock_get_timestamp(struct sock *, struct timeval __user *); 2273int sock_get_timestamp(struct sock *, struct timeval __user *);
2231int sock_get_timestampns(struct sock *, struct timespec __user *); 2274int sock_get_timestampns(struct sock *, struct timespec __user *);
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index bc865e244efe..1d22ce9f352e 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -323,7 +323,7 @@ static inline int switchdev_port_fdb_dump(struct sk_buff *skb,
323 struct net_device *filter_dev, 323 struct net_device *filter_dev,
324 int idx) 324 int idx)
325{ 325{
326 return -EOPNOTSUPP; 326 return idx;
327} 327}
328 328
329static inline void switchdev_port_fwd_mark_set(struct net_device *dev, 329static inline void switchdev_port_fwd_mark_set(struct net_device *dev,
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index ed527121031d..fcfa3d7f5e7e 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -668,6 +668,9 @@ struct Scsi_Host {
668 unsigned use_blk_mq:1; 668 unsigned use_blk_mq:1;
669 unsigned use_cmd_list:1; 669 unsigned use_cmd_list:1;
670 670
671 /* Host responded with short (<36 bytes) INQUIRY result */
672 unsigned short_inquiry:1;
673
671 /* 674 /*
672 * Optional work queue to be utilized by the transport 675 * Optional work queue to be utilized by the transport
673 */ 676 */
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 7855cfe46b69..95a937eafb79 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -398,6 +398,7 @@ int snd_soc_dapm_del_routes(struct snd_soc_dapm_context *dapm,
398int snd_soc_dapm_weak_routes(struct snd_soc_dapm_context *dapm, 398int snd_soc_dapm_weak_routes(struct snd_soc_dapm_context *dapm,
399 const struct snd_soc_dapm_route *route, int num); 399 const struct snd_soc_dapm_route *route, int num);
400void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w); 400void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w);
401void snd_soc_dapm_reset_cache(struct snd_soc_dapm_context *dapm);
401 402
402/* dapm events */ 403/* dapm events */
403void snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, int stream, 404void snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, int stream,
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 0a2c74008e53..aabf0aca0171 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -474,7 +474,7 @@ struct se_cmd {
474 struct completion cmd_wait_comp; 474 struct completion cmd_wait_comp;
475 const struct target_core_fabric_ops *se_tfo; 475 const struct target_core_fabric_ops *se_tfo;
476 sense_reason_t (*execute_cmd)(struct se_cmd *); 476 sense_reason_t (*execute_cmd)(struct se_cmd *);
477 sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool); 477 sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool, int *);
478 void *protocol_data; 478 void *protocol_data;
479 479
480 unsigned char *t_task_cdb; 480 unsigned char *t_task_cdb;
diff --git a/include/uapi/drm/Kbuild b/include/uapi/drm/Kbuild
index 38d437096c35..9355dd8eff3b 100644
--- a/include/uapi/drm/Kbuild
+++ b/include/uapi/drm/Kbuild
@@ -3,6 +3,7 @@ header-y += drm.h
3header-y += drm_fourcc.h 3header-y += drm_fourcc.h
4header-y += drm_mode.h 4header-y += drm_mode.h
5header-y += drm_sarea.h 5header-y += drm_sarea.h
6header-y += amdgpu_drm.h
6header-y += exynos_drm.h 7header-y += exynos_drm.h
7header-y += i810_drm.h 8header-y += i810_drm.h
8header-y += i915_drm.h 9header-y += i915_drm.h
@@ -17,4 +18,5 @@ header-y += tegra_drm.h
17header-y += via_drm.h 18header-y += via_drm.h
18header-y += vmwgfx_drm.h 19header-y += vmwgfx_drm.h
19header-y += msm_drm.h 20header-y += msm_drm.h
21header-y += vc4_drm.h
20header-y += virtgpu_drm.h 22header-y += virtgpu_drm.h
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index e52933a73580..453a76af123c 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -76,19 +76,19 @@
76 76
77struct drm_amdgpu_gem_create_in { 77struct drm_amdgpu_gem_create_in {
78 /** the requested memory size */ 78 /** the requested memory size */
79 uint64_t bo_size; 79 __u64 bo_size;
80 /** physical start_addr alignment in bytes for some HW requirements */ 80 /** physical start_addr alignment in bytes for some HW requirements */
81 uint64_t alignment; 81 __u64 alignment;
82 /** the requested memory domains */ 82 /** the requested memory domains */
83 uint64_t domains; 83 __u64 domains;
84 /** allocation flags */ 84 /** allocation flags */
85 uint64_t domain_flags; 85 __u64 domain_flags;
86}; 86};
87 87
88struct drm_amdgpu_gem_create_out { 88struct drm_amdgpu_gem_create_out {
89 /** returned GEM object handle */ 89 /** returned GEM object handle */
90 uint32_t handle; 90 __u32 handle;
91 uint32_t _pad; 91 __u32 _pad;
92}; 92};
93 93
94union drm_amdgpu_gem_create { 94union drm_amdgpu_gem_create {
@@ -105,28 +105,28 @@ union drm_amdgpu_gem_create {
105 105
106struct drm_amdgpu_bo_list_in { 106struct drm_amdgpu_bo_list_in {
107 /** Type of operation */ 107 /** Type of operation */
108 uint32_t operation; 108 __u32 operation;
109 /** Handle of list or 0 if we want to create one */ 109 /** Handle of list or 0 if we want to create one */
110 uint32_t list_handle; 110 __u32 list_handle;
111 /** Number of BOs in list */ 111 /** Number of BOs in list */
112 uint32_t bo_number; 112 __u32 bo_number;
113 /** Size of each element describing BO */ 113 /** Size of each element describing BO */
114 uint32_t bo_info_size; 114 __u32 bo_info_size;
115 /** Pointer to array describing BOs */ 115 /** Pointer to array describing BOs */
116 uint64_t bo_info_ptr; 116 __u64 bo_info_ptr;
117}; 117};
118 118
119struct drm_amdgpu_bo_list_entry { 119struct drm_amdgpu_bo_list_entry {
120 /** Handle of BO */ 120 /** Handle of BO */
121 uint32_t bo_handle; 121 __u32 bo_handle;
122 /** New (if specified) BO priority to be used during migration */ 122 /** New (if specified) BO priority to be used during migration */
123 uint32_t bo_priority; 123 __u32 bo_priority;
124}; 124};
125 125
126struct drm_amdgpu_bo_list_out { 126struct drm_amdgpu_bo_list_out {
127 /** Handle of resource list */ 127 /** Handle of resource list */
128 uint32_t list_handle; 128 __u32 list_handle;
129 uint32_t _pad; 129 __u32 _pad;
130}; 130};
131 131
132union drm_amdgpu_bo_list { 132union drm_amdgpu_bo_list {
@@ -150,26 +150,26 @@ union drm_amdgpu_bo_list {
150 150
151struct drm_amdgpu_ctx_in { 151struct drm_amdgpu_ctx_in {
152 /** AMDGPU_CTX_OP_* */ 152 /** AMDGPU_CTX_OP_* */
153 uint32_t op; 153 __u32 op;
154 /** For future use, no flags defined so far */ 154 /** For future use, no flags defined so far */
155 uint32_t flags; 155 __u32 flags;
156 uint32_t ctx_id; 156 __u32 ctx_id;
157 uint32_t _pad; 157 __u32 _pad;
158}; 158};
159 159
160union drm_amdgpu_ctx_out { 160union drm_amdgpu_ctx_out {
161 struct { 161 struct {
162 uint32_t ctx_id; 162 __u32 ctx_id;
163 uint32_t _pad; 163 __u32 _pad;
164 } alloc; 164 } alloc;
165 165
166 struct { 166 struct {
167 /** For future use, no flags defined so far */ 167 /** For future use, no flags defined so far */
168 uint64_t flags; 168 __u64 flags;
169 /** Number of resets caused by this context so far. */ 169 /** Number of resets caused by this context so far. */
170 uint32_t hangs; 170 __u32 hangs;
171 /** Reset status since the last call of the ioctl. */ 171 /** Reset status since the last call of the ioctl. */
172 uint32_t reset_status; 172 __u32 reset_status;
173 } state; 173 } state;
174}; 174};
175 175
@@ -189,12 +189,12 @@ union drm_amdgpu_ctx {
189#define AMDGPU_GEM_USERPTR_REGISTER (1 << 3) 189#define AMDGPU_GEM_USERPTR_REGISTER (1 << 3)
190 190
191struct drm_amdgpu_gem_userptr { 191struct drm_amdgpu_gem_userptr {
192 uint64_t addr; 192 __u64 addr;
193 uint64_t size; 193 __u64 size;
194 /* AMDGPU_GEM_USERPTR_* */ 194 /* AMDGPU_GEM_USERPTR_* */
195 uint32_t flags; 195 __u32 flags;
196 /* Resulting GEM handle */ 196 /* Resulting GEM handle */
197 uint32_t handle; 197 __u32 handle;
198}; 198};
199 199
200/* same meaning as the GB_TILE_MODE and GL_MACRO_TILE_MODE fields */ 200/* same meaning as the GB_TILE_MODE and GL_MACRO_TILE_MODE fields */
@@ -226,28 +226,28 @@ struct drm_amdgpu_gem_userptr {
226/** The same structure is shared for input/output */ 226/** The same structure is shared for input/output */
227struct drm_amdgpu_gem_metadata { 227struct drm_amdgpu_gem_metadata {
228 /** GEM Object handle */ 228 /** GEM Object handle */
229 uint32_t handle; 229 __u32 handle;
230 /** Do we want get or set metadata */ 230 /** Do we want get or set metadata */
231 uint32_t op; 231 __u32 op;
232 struct { 232 struct {
233 /** For future use, no flags defined so far */ 233 /** For future use, no flags defined so far */
234 uint64_t flags; 234 __u64 flags;
235 /** family specific tiling info */ 235 /** family specific tiling info */
236 uint64_t tiling_info; 236 __u64 tiling_info;
237 uint32_t data_size_bytes; 237 __u32 data_size_bytes;
238 uint32_t data[64]; 238 __u32 data[64];
239 } data; 239 } data;
240}; 240};
241 241
242struct drm_amdgpu_gem_mmap_in { 242struct drm_amdgpu_gem_mmap_in {
243 /** the GEM object handle */ 243 /** the GEM object handle */
244 uint32_t handle; 244 __u32 handle;
245 uint32_t _pad; 245 __u32 _pad;
246}; 246};
247 247
248struct drm_amdgpu_gem_mmap_out { 248struct drm_amdgpu_gem_mmap_out {
249 /** mmap offset from the vma offset manager */ 249 /** mmap offset from the vma offset manager */
250 uint64_t addr_ptr; 250 __u64 addr_ptr;
251}; 251};
252 252
253union drm_amdgpu_gem_mmap { 253union drm_amdgpu_gem_mmap {
@@ -257,18 +257,18 @@ union drm_amdgpu_gem_mmap {
257 257
258struct drm_amdgpu_gem_wait_idle_in { 258struct drm_amdgpu_gem_wait_idle_in {
259 /** GEM object handle */ 259 /** GEM object handle */
260 uint32_t handle; 260 __u32 handle;
261 /** For future use, no flags defined so far */ 261 /** For future use, no flags defined so far */
262 uint32_t flags; 262 __u32 flags;
263 /** Absolute timeout to wait */ 263 /** Absolute timeout to wait */
264 uint64_t timeout; 264 __u64 timeout;
265}; 265};
266 266
267struct drm_amdgpu_gem_wait_idle_out { 267struct drm_amdgpu_gem_wait_idle_out {
268 /** BO status: 0 - BO is idle, 1 - BO is busy */ 268 /** BO status: 0 - BO is idle, 1 - BO is busy */
269 uint32_t status; 269 __u32 status;
270 /** Returned current memory domain */ 270 /** Returned current memory domain */
271 uint32_t domain; 271 __u32 domain;
272}; 272};
273 273
274union drm_amdgpu_gem_wait_idle { 274union drm_amdgpu_gem_wait_idle {
@@ -278,18 +278,18 @@ union drm_amdgpu_gem_wait_idle {
278 278
279struct drm_amdgpu_wait_cs_in { 279struct drm_amdgpu_wait_cs_in {
280 /** Command submission handle */ 280 /** Command submission handle */
281 uint64_t handle; 281 __u64 handle;
282 /** Absolute timeout to wait */ 282 /** Absolute timeout to wait */
283 uint64_t timeout; 283 __u64 timeout;
284 uint32_t ip_type; 284 __u32 ip_type;
285 uint32_t ip_instance; 285 __u32 ip_instance;
286 uint32_t ring; 286 __u32 ring;
287 uint32_t ctx_id; 287 __u32 ctx_id;
288}; 288};
289 289
290struct drm_amdgpu_wait_cs_out { 290struct drm_amdgpu_wait_cs_out {
291 /** CS status: 0 - CS completed, 1 - CS still busy */ 291 /** CS status: 0 - CS completed, 1 - CS still busy */
292 uint64_t status; 292 __u64 status;
293}; 293};
294 294
295union drm_amdgpu_wait_cs { 295union drm_amdgpu_wait_cs {
@@ -303,11 +303,11 @@ union drm_amdgpu_wait_cs {
303/* Sets or returns a value associated with a buffer. */ 303/* Sets or returns a value associated with a buffer. */
304struct drm_amdgpu_gem_op { 304struct drm_amdgpu_gem_op {
305 /** GEM object handle */ 305 /** GEM object handle */
306 uint32_t handle; 306 __u32 handle;
307 /** AMDGPU_GEM_OP_* */ 307 /** AMDGPU_GEM_OP_* */
308 uint32_t op; 308 __u32 op;
309 /** Input or return value */ 309 /** Input or return value */
310 uint64_t value; 310 __u64 value;
311}; 311};
312 312
313#define AMDGPU_VA_OP_MAP 1 313#define AMDGPU_VA_OP_MAP 1
@@ -326,18 +326,18 @@ struct drm_amdgpu_gem_op {
326 326
327struct drm_amdgpu_gem_va { 327struct drm_amdgpu_gem_va {
328 /** GEM object handle */ 328 /** GEM object handle */
329 uint32_t handle; 329 __u32 handle;
330 uint32_t _pad; 330 __u32 _pad;
331 /** AMDGPU_VA_OP_* */ 331 /** AMDGPU_VA_OP_* */
332 uint32_t operation; 332 __u32 operation;
333 /** AMDGPU_VM_PAGE_* */ 333 /** AMDGPU_VM_PAGE_* */
334 uint32_t flags; 334 __u32 flags;
335 /** va address to assign . Must be correctly aligned.*/ 335 /** va address to assign . Must be correctly aligned.*/
336 uint64_t va_address; 336 __u64 va_address;
337 /** Specify offset inside of BO to assign. Must be correctly aligned.*/ 337 /** Specify offset inside of BO to assign. Must be correctly aligned.*/
338 uint64_t offset_in_bo; 338 __u64 offset_in_bo;
339 /** Specify mapping size. Must be correctly aligned. */ 339 /** Specify mapping size. Must be correctly aligned. */
340 uint64_t map_size; 340 __u64 map_size;
341}; 341};
342 342
343#define AMDGPU_HW_IP_GFX 0 343#define AMDGPU_HW_IP_GFX 0
@@ -354,24 +354,24 @@ struct drm_amdgpu_gem_va {
354#define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03 354#define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03
355 355
356struct drm_amdgpu_cs_chunk { 356struct drm_amdgpu_cs_chunk {
357 uint32_t chunk_id; 357 __u32 chunk_id;
358 uint32_t length_dw; 358 __u32 length_dw;
359 uint64_t chunk_data; 359 __u64 chunk_data;
360}; 360};
361 361
362struct drm_amdgpu_cs_in { 362struct drm_amdgpu_cs_in {
363 /** Rendering context id */ 363 /** Rendering context id */
364 uint32_t ctx_id; 364 __u32 ctx_id;
365 /** Handle of resource list associated with CS */ 365 /** Handle of resource list associated with CS */
366 uint32_t bo_list_handle; 366 __u32 bo_list_handle;
367 uint32_t num_chunks; 367 __u32 num_chunks;
368 uint32_t _pad; 368 __u32 _pad;
369 /** this points to uint64_t * which point to cs chunks */ 369 /** this points to __u64 * which point to cs chunks */
370 uint64_t chunks; 370 __u64 chunks;
371}; 371};
372 372
373struct drm_amdgpu_cs_out { 373struct drm_amdgpu_cs_out {
374 uint64_t handle; 374 __u64 handle;
375}; 375};
376 376
377union drm_amdgpu_cs { 377union drm_amdgpu_cs {
@@ -388,32 +388,32 @@ union drm_amdgpu_cs {
388#define AMDGPU_IB_FLAG_PREAMBLE (1<<1) 388#define AMDGPU_IB_FLAG_PREAMBLE (1<<1)
389 389
390struct drm_amdgpu_cs_chunk_ib { 390struct drm_amdgpu_cs_chunk_ib {
391 uint32_t _pad; 391 __u32 _pad;
392 /** AMDGPU_IB_FLAG_* */ 392 /** AMDGPU_IB_FLAG_* */
393 uint32_t flags; 393 __u32 flags;
394 /** Virtual address to begin IB execution */ 394 /** Virtual address to begin IB execution */
395 uint64_t va_start; 395 __u64 va_start;
396 /** Size of submission */ 396 /** Size of submission */
397 uint32_t ib_bytes; 397 __u32 ib_bytes;
398 /** HW IP to submit to */ 398 /** HW IP to submit to */
399 uint32_t ip_type; 399 __u32 ip_type;
400 /** HW IP index of the same type to submit to */ 400 /** HW IP index of the same type to submit to */
401 uint32_t ip_instance; 401 __u32 ip_instance;
402 /** Ring index to submit to */ 402 /** Ring index to submit to */
403 uint32_t ring; 403 __u32 ring;
404}; 404};
405 405
406struct drm_amdgpu_cs_chunk_dep { 406struct drm_amdgpu_cs_chunk_dep {
407 uint32_t ip_type; 407 __u32 ip_type;
408 uint32_t ip_instance; 408 __u32 ip_instance;
409 uint32_t ring; 409 __u32 ring;
410 uint32_t ctx_id; 410 __u32 ctx_id;
411 uint64_t handle; 411 __u64 handle;
412}; 412};
413 413
414struct drm_amdgpu_cs_chunk_fence { 414struct drm_amdgpu_cs_chunk_fence {
415 uint32_t handle; 415 __u32 handle;
416 uint32_t offset; 416 __u32 offset;
417}; 417};
418 418
419struct drm_amdgpu_cs_chunk_data { 419struct drm_amdgpu_cs_chunk_data {
@@ -486,83 +486,83 @@ struct drm_amdgpu_cs_chunk_data {
486/* Input structure for the INFO ioctl */ 486/* Input structure for the INFO ioctl */
487struct drm_amdgpu_info { 487struct drm_amdgpu_info {
488 /* Where the return value will be stored */ 488 /* Where the return value will be stored */
489 uint64_t return_pointer; 489 __u64 return_pointer;
490 /* The size of the return value. Just like "size" in "snprintf", 490 /* The size of the return value. Just like "size" in "snprintf",
491 * it limits how many bytes the kernel can write. */ 491 * it limits how many bytes the kernel can write. */
492 uint32_t return_size; 492 __u32 return_size;
493 /* The query request id. */ 493 /* The query request id. */
494 uint32_t query; 494 __u32 query;
495 495
496 union { 496 union {
497 struct { 497 struct {
498 uint32_t id; 498 __u32 id;
499 uint32_t _pad; 499 __u32 _pad;
500 } mode_crtc; 500 } mode_crtc;
501 501
502 struct { 502 struct {
503 /** AMDGPU_HW_IP_* */ 503 /** AMDGPU_HW_IP_* */
504 uint32_t type; 504 __u32 type;
505 /** 505 /**
506 * Index of the IP if there are more IPs of the same 506 * Index of the IP if there are more IPs of the same
507 * type. Ignored by AMDGPU_INFO_HW_IP_COUNT. 507 * type. Ignored by AMDGPU_INFO_HW_IP_COUNT.
508 */ 508 */
509 uint32_t ip_instance; 509 __u32 ip_instance;
510 } query_hw_ip; 510 } query_hw_ip;
511 511
512 struct { 512 struct {
513 uint32_t dword_offset; 513 __u32 dword_offset;
514 /** number of registers to read */ 514 /** number of registers to read */
515 uint32_t count; 515 __u32 count;
516 uint32_t instance; 516 __u32 instance;
517 /** For future use, no flags defined so far */ 517 /** For future use, no flags defined so far */
518 uint32_t flags; 518 __u32 flags;
519 } read_mmr_reg; 519 } read_mmr_reg;
520 520
521 struct { 521 struct {
522 /** AMDGPU_INFO_FW_* */ 522 /** AMDGPU_INFO_FW_* */
523 uint32_t fw_type; 523 __u32 fw_type;
524 /** 524 /**
525 * Index of the IP if there are more IPs of 525 * Index of the IP if there are more IPs of
526 * the same type. 526 * the same type.
527 */ 527 */
528 uint32_t ip_instance; 528 __u32 ip_instance;
529 /** 529 /**
530 * Index of the engine. Whether this is used depends 530 * Index of the engine. Whether this is used depends
531 * on the firmware type. (e.g. MEC, SDMA) 531 * on the firmware type. (e.g. MEC, SDMA)
532 */ 532 */
533 uint32_t index; 533 __u32 index;
534 uint32_t _pad; 534 __u32 _pad;
535 } query_fw; 535 } query_fw;
536 }; 536 };
537}; 537};
538 538
539struct drm_amdgpu_info_gds { 539struct drm_amdgpu_info_gds {
540 /** GDS GFX partition size */ 540 /** GDS GFX partition size */
541 uint32_t gds_gfx_partition_size; 541 __u32 gds_gfx_partition_size;
542 /** GDS compute partition size */ 542 /** GDS compute partition size */
543 uint32_t compute_partition_size; 543 __u32 compute_partition_size;
544 /** total GDS memory size */ 544 /** total GDS memory size */
545 uint32_t gds_total_size; 545 __u32 gds_total_size;
546 /** GWS size per GFX partition */ 546 /** GWS size per GFX partition */
547 uint32_t gws_per_gfx_partition; 547 __u32 gws_per_gfx_partition;
548 /** GSW size per compute partition */ 548 /** GSW size per compute partition */
549 uint32_t gws_per_compute_partition; 549 __u32 gws_per_compute_partition;
550 /** OA size per GFX partition */ 550 /** OA size per GFX partition */
551 uint32_t oa_per_gfx_partition; 551 __u32 oa_per_gfx_partition;
552 /** OA size per compute partition */ 552 /** OA size per compute partition */
553 uint32_t oa_per_compute_partition; 553 __u32 oa_per_compute_partition;
554 uint32_t _pad; 554 __u32 _pad;
555}; 555};
556 556
557struct drm_amdgpu_info_vram_gtt { 557struct drm_amdgpu_info_vram_gtt {
558 uint64_t vram_size; 558 __u64 vram_size;
559 uint64_t vram_cpu_accessible_size; 559 __u64 vram_cpu_accessible_size;
560 uint64_t gtt_size; 560 __u64 gtt_size;
561}; 561};
562 562
563struct drm_amdgpu_info_firmware { 563struct drm_amdgpu_info_firmware {
564 uint32_t ver; 564 __u32 ver;
565 uint32_t feature; 565 __u32 feature;
566}; 566};
567 567
568#define AMDGPU_VRAM_TYPE_UNKNOWN 0 568#define AMDGPU_VRAM_TYPE_UNKNOWN 0
@@ -576,61 +576,61 @@ struct drm_amdgpu_info_firmware {
576 576
577struct drm_amdgpu_info_device { 577struct drm_amdgpu_info_device {
578 /** PCI Device ID */ 578 /** PCI Device ID */
579 uint32_t device_id; 579 __u32 device_id;
580 /** Internal chip revision: A0, A1, etc.) */ 580 /** Internal chip revision: A0, A1, etc.) */
581 uint32_t chip_rev; 581 __u32 chip_rev;
582 uint32_t external_rev; 582 __u32 external_rev;
583 /** Revision id in PCI Config space */ 583 /** Revision id in PCI Config space */
584 uint32_t pci_rev; 584 __u32 pci_rev;
585 uint32_t family; 585 __u32 family;
586 uint32_t num_shader_engines; 586 __u32 num_shader_engines;
587 uint32_t num_shader_arrays_per_engine; 587 __u32 num_shader_arrays_per_engine;
588 /* in KHz */ 588 /* in KHz */
589 uint32_t gpu_counter_freq; 589 __u32 gpu_counter_freq;
590 uint64_t max_engine_clock; 590 __u64 max_engine_clock;
591 uint64_t max_memory_clock; 591 __u64 max_memory_clock;
592 /* cu information */ 592 /* cu information */
593 uint32_t cu_active_number; 593 __u32 cu_active_number;
594 uint32_t cu_ao_mask; 594 __u32 cu_ao_mask;
595 uint32_t cu_bitmap[4][4]; 595 __u32 cu_bitmap[4][4];
596 /** Render backend pipe mask. One render backend is CB+DB. */ 596 /** Render backend pipe mask. One render backend is CB+DB. */
597 uint32_t enabled_rb_pipes_mask; 597 __u32 enabled_rb_pipes_mask;
598 uint32_t num_rb_pipes; 598 __u32 num_rb_pipes;
599 uint32_t num_hw_gfx_contexts; 599 __u32 num_hw_gfx_contexts;
600 uint32_t _pad; 600 __u32 _pad;
601 uint64_t ids_flags; 601 __u64 ids_flags;
602 /** Starting virtual address for UMDs. */ 602 /** Starting virtual address for UMDs. */
603 uint64_t virtual_address_offset; 603 __u64 virtual_address_offset;
604 /** The maximum virtual address */ 604 /** The maximum virtual address */
605 uint64_t virtual_address_max; 605 __u64 virtual_address_max;
606 /** Required alignment of virtual addresses. */ 606 /** Required alignment of virtual addresses. */
607 uint32_t virtual_address_alignment; 607 __u32 virtual_address_alignment;
608 /** Page table entry - fragment size */ 608 /** Page table entry - fragment size */
609 uint32_t pte_fragment_size; 609 __u32 pte_fragment_size;
610 uint32_t gart_page_size; 610 __u32 gart_page_size;
611 /** constant engine ram size*/ 611 /** constant engine ram size*/
612 uint32_t ce_ram_size; 612 __u32 ce_ram_size;
613 /** video memory type info*/ 613 /** video memory type info*/
614 uint32_t vram_type; 614 __u32 vram_type;
615 /** video memory bit width*/ 615 /** video memory bit width*/
616 uint32_t vram_bit_width; 616 __u32 vram_bit_width;
617 /* vce harvesting instance */ 617 /* vce harvesting instance */
618 uint32_t vce_harvest_config; 618 __u32 vce_harvest_config;
619}; 619};
620 620
621struct drm_amdgpu_info_hw_ip { 621struct drm_amdgpu_info_hw_ip {
622 /** Version of h/w IP */ 622 /** Version of h/w IP */
623 uint32_t hw_ip_version_major; 623 __u32 hw_ip_version_major;
624 uint32_t hw_ip_version_minor; 624 __u32 hw_ip_version_minor;
625 /** Capabilities */ 625 /** Capabilities */
626 uint64_t capabilities_flags; 626 __u64 capabilities_flags;
627 /** command buffer address start alignment*/ 627 /** command buffer address start alignment*/
628 uint32_t ib_start_alignment; 628 __u32 ib_start_alignment;
629 /** command buffer size alignment*/ 629 /** command buffer size alignment*/
630 uint32_t ib_size_alignment; 630 __u32 ib_size_alignment;
631 /** Bitmask of available rings. Bit 0 means ring 0, etc. */ 631 /** Bitmask of available rings. Bit 0 means ring 0, etc. */
632 uint32_t available_rings; 632 __u32 available_rings;
633 uint32_t _pad; 633 __u32 _pad;
634}; 634};
635 635
636/* 636/*
diff --git a/include/uapi/drm/armada_drm.h b/include/uapi/drm/armada_drm.h
index 8dec3fdc99c7..6de7f0196ca0 100644
--- a/include/uapi/drm/armada_drm.h
+++ b/include/uapi/drm/armada_drm.h
@@ -9,6 +9,8 @@
9#ifndef DRM_ARMADA_IOCTL_H 9#ifndef DRM_ARMADA_IOCTL_H
10#define DRM_ARMADA_IOCTL_H 10#define DRM_ARMADA_IOCTL_H
11 11
12#include "drm.h"
13
12#define DRM_ARMADA_GEM_CREATE 0x00 14#define DRM_ARMADA_GEM_CREATE 0x00
13#define DRM_ARMADA_GEM_MMAP 0x02 15#define DRM_ARMADA_GEM_MMAP 0x02
14#define DRM_ARMADA_GEM_PWRITE 0x03 16#define DRM_ARMADA_GEM_PWRITE 0x03
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 3801584a0c53..b4e92eb12044 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -54,6 +54,7 @@ typedef int32_t __s32;
54typedef uint32_t __u32; 54typedef uint32_t __u32;
55typedef int64_t __s64; 55typedef int64_t __s64;
56typedef uint64_t __u64; 56typedef uint64_t __u64;
57typedef size_t __kernel_size_t;
57typedef unsigned long drm_handle_t; 58typedef unsigned long drm_handle_t;
58 59
59#endif 60#endif
@@ -129,11 +130,11 @@ struct drm_version {
129 int version_major; /**< Major version */ 130 int version_major; /**< Major version */
130 int version_minor; /**< Minor version */ 131 int version_minor; /**< Minor version */
131 int version_patchlevel; /**< Patch level */ 132 int version_patchlevel; /**< Patch level */
132 size_t name_len; /**< Length of name buffer */ 133 __kernel_size_t name_len; /**< Length of name buffer */
133 char __user *name; /**< Name of driver */ 134 char __user *name; /**< Name of driver */
134 size_t date_len; /**< Length of date buffer */ 135 __kernel_size_t date_len; /**< Length of date buffer */
135 char __user *date; /**< User-space buffer to hold date */ 136 char __user *date; /**< User-space buffer to hold date */
136 size_t desc_len; /**< Length of desc buffer */ 137 __kernel_size_t desc_len; /**< Length of desc buffer */
137 char __user *desc; /**< User-space buffer to hold desc */ 138 char __user *desc; /**< User-space buffer to hold desc */
138}; 139};
139 140
@@ -143,7 +144,7 @@ struct drm_version {
143 * \sa drmGetBusid() and drmSetBusId(). 144 * \sa drmGetBusid() and drmSetBusId().
144 */ 145 */
145struct drm_unique { 146struct drm_unique {
146 size_t unique_len; /**< Length of unique */ 147 __kernel_size_t unique_len; /**< Length of unique */
147 char __user *unique; /**< Unique name for driver instantiation */ 148 char __user *unique; /**< Unique name for driver instantiation */
148}; 149};
149 150
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 0b69a7753558..998bd253faad 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -24,7 +24,7 @@
24#ifndef DRM_FOURCC_H 24#ifndef DRM_FOURCC_H
25#define DRM_FOURCC_H 25#define DRM_FOURCC_H
26 26
27#include <linux/types.h> 27#include "drm.h"
28 28
29#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \ 29#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \
30 ((__u32)(c) << 16) | ((__u32)(d) << 24)) 30 ((__u32)(c) << 16) | ((__u32)(d) << 24))
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 6c11ca401de8..50adb46204c2 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -27,7 +27,7 @@
27#ifndef _DRM_MODE_H 27#ifndef _DRM_MODE_H
28#define _DRM_MODE_H 28#define _DRM_MODE_H
29 29
30#include <linux/types.h> 30#include "drm.h"
31 31
32#define DRM_DISPLAY_INFO_LEN 32 32#define DRM_DISPLAY_INFO_LEN 32
33#define DRM_CONNECTOR_NAME_LEN 32 33#define DRM_CONNECTOR_NAME_LEN 32
@@ -526,14 +526,14 @@ struct drm_mode_crtc_page_flip {
526 526
527/* create a dumb scanout buffer */ 527/* create a dumb scanout buffer */
528struct drm_mode_create_dumb { 528struct drm_mode_create_dumb {
529 uint32_t height; 529 __u32 height;
530 uint32_t width; 530 __u32 width;
531 uint32_t bpp; 531 __u32 bpp;
532 uint32_t flags; 532 __u32 flags;
533 /* handle, pitch, size will be returned */ 533 /* handle, pitch, size will be returned */
534 uint32_t handle; 534 __u32 handle;
535 uint32_t pitch; 535 __u32 pitch;
536 uint64_t size; 536 __u64 size;
537}; 537};
538 538
539/* set up for mmap of a dumb scanout buffer */ 539/* set up for mmap of a dumb scanout buffer */
@@ -550,7 +550,7 @@ struct drm_mode_map_dumb {
550}; 550};
551 551
552struct drm_mode_destroy_dumb { 552struct drm_mode_destroy_dumb {
553 uint32_t handle; 553 __u32 handle;
554}; 554};
555 555
556/* page-flip flags are valid, plus: */ 556/* page-flip flags are valid, plus: */
diff --git a/include/uapi/drm/drm_sarea.h b/include/uapi/drm/drm_sarea.h
index 413a5642d49f..1d1a858a203d 100644
--- a/include/uapi/drm/drm_sarea.h
+++ b/include/uapi/drm/drm_sarea.h
@@ -32,7 +32,7 @@
32#ifndef _DRM_SAREA_H_ 32#ifndef _DRM_SAREA_H_
33#define _DRM_SAREA_H_ 33#define _DRM_SAREA_H_
34 34
35#include <drm/drm.h> 35#include "drm.h"
36 36
37/* SAREA area needs to be at least a page */ 37/* SAREA area needs to be at least a page */
38#if defined(__alpha__) 38#if defined(__alpha__)
diff --git a/include/uapi/drm/exynos_drm.h b/include/uapi/drm/exynos_drm.h
index 5575ed1598bd..312c67d744ae 100644
--- a/include/uapi/drm/exynos_drm.h
+++ b/include/uapi/drm/exynos_drm.h
@@ -15,7 +15,7 @@
15#ifndef _UAPI_EXYNOS_DRM_H_ 15#ifndef _UAPI_EXYNOS_DRM_H_
16#define _UAPI_EXYNOS_DRM_H_ 16#define _UAPI_EXYNOS_DRM_H_
17 17
18#include <drm/drm.h> 18#include "drm.h"
19 19
20/** 20/**
21 * User-desired buffer creation information structure. 21 * User-desired buffer creation information structure.
@@ -27,7 +27,7 @@
27 * - this handle will be set by gem module of kernel side. 27 * - this handle will be set by gem module of kernel side.
28 */ 28 */
29struct drm_exynos_gem_create { 29struct drm_exynos_gem_create {
30 uint64_t size; 30 __u64 size;
31 unsigned int flags; 31 unsigned int flags;
32 unsigned int handle; 32 unsigned int handle;
33}; 33};
@@ -44,7 +44,7 @@ struct drm_exynos_gem_create {
44struct drm_exynos_gem_info { 44struct drm_exynos_gem_info {
45 unsigned int handle; 45 unsigned int handle;
46 unsigned int flags; 46 unsigned int flags;
47 uint64_t size; 47 __u64 size;
48}; 48};
49 49
50/** 50/**
@@ -58,7 +58,7 @@ struct drm_exynos_gem_info {
58struct drm_exynos_vidi_connection { 58struct drm_exynos_vidi_connection {
59 unsigned int connection; 59 unsigned int connection;
60 unsigned int extensions; 60 unsigned int extensions;
61 uint64_t edid; 61 __u64 edid;
62}; 62};
63 63
64/* memory type definitions. */ 64/* memory type definitions. */
diff --git a/include/uapi/drm/i810_drm.h b/include/uapi/drm/i810_drm.h
index 34736efd5824..bdb028723ded 100644
--- a/include/uapi/drm/i810_drm.h
+++ b/include/uapi/drm/i810_drm.h
@@ -1,7 +1,7 @@
1#ifndef _I810_DRM_H_ 1#ifndef _I810_DRM_H_
2#define _I810_DRM_H_ 2#define _I810_DRM_H_
3 3
4#include <drm/drm.h> 4#include "drm.h"
5 5
6/* WARNING: These defines must be the same as what the Xserver uses. 6/* WARNING: These defines must be the same as what the Xserver uses.
7 * if you change them, you must change the defines in the Xserver. 7 * if you change them, you must change the defines in the Xserver.
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 484a9fb20479..c937a3628190 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -27,7 +27,7 @@
27#ifndef _UAPI_I915_DRM_H_ 27#ifndef _UAPI_I915_DRM_H_
28#define _UAPI_I915_DRM_H_ 28#define _UAPI_I915_DRM_H_
29 29
30#include <drm/drm.h> 30#include "drm.h"
31 31
32/* Please note that modifications to all structs defined here are 32/* Please note that modifications to all structs defined here are
33 * subject to backwards-compatibility constraints. 33 * subject to backwards-compatibility constraints.
@@ -1079,6 +1079,12 @@ struct drm_i915_gem_context_destroy {
1079}; 1079};
1080 1080
1081struct drm_i915_reg_read { 1081struct drm_i915_reg_read {
1082 /*
1083 * Register offset.
1084 * For 64bit wide registers where the upper 32bits don't immediately
1085 * follow the lower 32bits, the offset of the lower 32bits must
1086 * be specified
1087 */
1082 __u64 offset; 1088 __u64 offset;
1083 __u64 val; /* Return value */ 1089 __u64 val; /* Return value */
1084}; 1090};
@@ -1125,8 +1131,9 @@ struct drm_i915_gem_context_param {
1125 __u32 ctx_id; 1131 __u32 ctx_id;
1126 __u32 size; 1132 __u32 size;
1127 __u64 param; 1133 __u64 param;
1128#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1 1134#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
1129#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2 1135#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2
1136#define I915_CONTEXT_PARAM_GTT_SIZE 0x3
1130 __u64 value; 1137 __u64 value;
1131}; 1138};
1132 1139
diff --git a/include/uapi/drm/mga_drm.h b/include/uapi/drm/mga_drm.h
index 2375bfd6e5e9..fca817009e13 100644
--- a/include/uapi/drm/mga_drm.h
+++ b/include/uapi/drm/mga_drm.h
@@ -35,7 +35,7 @@
35#ifndef __MGA_DRM_H__ 35#ifndef __MGA_DRM_H__
36#define __MGA_DRM_H__ 36#define __MGA_DRM_H__
37 37
38#include <drm/drm.h> 38#include "drm.h"
39 39
40/* WARNING: If you change any of these defines, make sure to change the 40/* WARNING: If you change any of these defines, make sure to change the
41 * defines in the Xserver file (mga_sarea.h) 41 * defines in the Xserver file (mga_sarea.h)
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 75a232b9a970..81e6e0d1d360 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -18,8 +18,7 @@
18#ifndef __MSM_DRM_H__ 18#ifndef __MSM_DRM_H__
19#define __MSM_DRM_H__ 19#define __MSM_DRM_H__
20 20
21#include <stddef.h> 21#include "drm.h"
22#include <drm/drm.h>
23 22
24/* Please note that modifications to all structs defined here are 23/* Please note that modifications to all structs defined here are
25 * subject to backwards-compatibility constraints: 24 * subject to backwards-compatibility constraints:
@@ -122,7 +121,7 @@ struct drm_msm_gem_cpu_fini {
122struct drm_msm_gem_submit_reloc { 121struct drm_msm_gem_submit_reloc {
123 __u32 submit_offset; /* in, offset from submit_bo */ 122 __u32 submit_offset; /* in, offset from submit_bo */
124 __u32 or; /* in, value OR'd with result */ 123 __u32 or; /* in, value OR'd with result */
125 __s32 shift; /* in, amount of left shift (can be negative) */ 124 __s32 shift; /* in, amount of left shift (can be negative) */
126 __u32 reloc_idx; /* in, index of reloc_bo buffer */ 125 __u32 reloc_idx; /* in, index of reloc_bo buffer */
127 __u64 reloc_offset; /* in, offset from start of reloc_bo */ 126 __u64 reloc_offset; /* in, offset from start of reloc_bo */
128}; 127};
diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h
index fd594cc73cc0..500d82aecbe4 100644
--- a/include/uapi/drm/nouveau_drm.h
+++ b/include/uapi/drm/nouveau_drm.h
@@ -27,6 +27,8 @@
27 27
28#define DRM_NOUVEAU_EVENT_NVIF 0x80000000 28#define DRM_NOUVEAU_EVENT_NVIF 0x80000000
29 29
30#include <drm/drm.h>
31
30#define NOUVEAU_GEM_DOMAIN_CPU (1 << 0) 32#define NOUVEAU_GEM_DOMAIN_CPU (1 << 0)
31#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1) 33#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
32#define NOUVEAU_GEM_DOMAIN_GART (1 << 2) 34#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
@@ -41,34 +43,34 @@
41#define NOUVEAU_GEM_TILE_NONCONTIG 0x00000008 43#define NOUVEAU_GEM_TILE_NONCONTIG 0x00000008
42 44
43struct drm_nouveau_gem_info { 45struct drm_nouveau_gem_info {
44 uint32_t handle; 46 __u32 handle;
45 uint32_t domain; 47 __u32 domain;
46 uint64_t size; 48 __u64 size;
47 uint64_t offset; 49 __u64 offset;
48 uint64_t map_handle; 50 __u64 map_handle;
49 uint32_t tile_mode; 51 __u32 tile_mode;
50 uint32_t tile_flags; 52 __u32 tile_flags;
51}; 53};
52 54
53struct drm_nouveau_gem_new { 55struct drm_nouveau_gem_new {
54 struct drm_nouveau_gem_info info; 56 struct drm_nouveau_gem_info info;
55 uint32_t channel_hint; 57 __u32 channel_hint;
56 uint32_t align; 58 __u32 align;
57}; 59};
58 60
59#define NOUVEAU_GEM_MAX_BUFFERS 1024 61#define NOUVEAU_GEM_MAX_BUFFERS 1024
60struct drm_nouveau_gem_pushbuf_bo_presumed { 62struct drm_nouveau_gem_pushbuf_bo_presumed {
61 uint32_t valid; 63 __u32 valid;
62 uint32_t domain; 64 __u32 domain;
63 uint64_t offset; 65 __u64 offset;
64}; 66};
65 67
66struct drm_nouveau_gem_pushbuf_bo { 68struct drm_nouveau_gem_pushbuf_bo {
67 uint64_t user_priv; 69 __u64 user_priv;
68 uint32_t handle; 70 __u32 handle;
69 uint32_t read_domains; 71 __u32 read_domains;
70 uint32_t write_domains; 72 __u32 write_domains;
71 uint32_t valid_domains; 73 __u32 valid_domains;
72 struct drm_nouveau_gem_pushbuf_bo_presumed presumed; 74 struct drm_nouveau_gem_pushbuf_bo_presumed presumed;
73}; 75};
74 76
@@ -77,46 +79,46 @@ struct drm_nouveau_gem_pushbuf_bo {
77#define NOUVEAU_GEM_RELOC_OR (1 << 2) 79#define NOUVEAU_GEM_RELOC_OR (1 << 2)
78#define NOUVEAU_GEM_MAX_RELOCS 1024 80#define NOUVEAU_GEM_MAX_RELOCS 1024
79struct drm_nouveau_gem_pushbuf_reloc { 81struct drm_nouveau_gem_pushbuf_reloc {
80 uint32_t reloc_bo_index; 82 __u32 reloc_bo_index;
81 uint32_t reloc_bo_offset; 83 __u32 reloc_bo_offset;
82 uint32_t bo_index; 84 __u32 bo_index;
83 uint32_t flags; 85 __u32 flags;
84 uint32_t data; 86 __u32 data;
85 uint32_t vor; 87 __u32 vor;
86 uint32_t tor; 88 __u32 tor;
87}; 89};
88 90
89#define NOUVEAU_GEM_MAX_PUSH 512 91#define NOUVEAU_GEM_MAX_PUSH 512
90struct drm_nouveau_gem_pushbuf_push { 92struct drm_nouveau_gem_pushbuf_push {
91 uint32_t bo_index; 93 __u32 bo_index;
92 uint32_t pad; 94 __u32 pad;
93 uint64_t offset; 95 __u64 offset;
94 uint64_t length; 96 __u64 length;
95}; 97};
96 98
97struct drm_nouveau_gem_pushbuf { 99struct drm_nouveau_gem_pushbuf {
98 uint32_t channel; 100 __u32 channel;
99 uint32_t nr_buffers; 101 __u32 nr_buffers;
100 uint64_t buffers; 102 __u64 buffers;
101 uint32_t nr_relocs; 103 __u32 nr_relocs;
102 uint32_t nr_push; 104 __u32 nr_push;
103 uint64_t relocs; 105 __u64 relocs;
104 uint64_t push; 106 __u64 push;
105 uint32_t suffix0; 107 __u32 suffix0;
106 uint32_t suffix1; 108 __u32 suffix1;
107 uint64_t vram_available; 109 __u64 vram_available;
108 uint64_t gart_available; 110 __u64 gart_available;
109}; 111};
110 112
111#define NOUVEAU_GEM_CPU_PREP_NOWAIT 0x00000001 113#define NOUVEAU_GEM_CPU_PREP_NOWAIT 0x00000001
112#define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004 114#define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004
113struct drm_nouveau_gem_cpu_prep { 115struct drm_nouveau_gem_cpu_prep {
114 uint32_t handle; 116 __u32 handle;
115 uint32_t flags; 117 __u32 flags;
116}; 118};
117 119
118struct drm_nouveau_gem_cpu_fini { 120struct drm_nouveau_gem_cpu_fini {
119 uint32_t handle; 121 __u32 handle;
120}; 122};
121 123
122#define DRM_NOUVEAU_GETPARAM 0x00 /* deprecated */ 124#define DRM_NOUVEAU_GETPARAM 0x00 /* deprecated */
diff --git a/include/uapi/drm/omap_drm.h b/include/uapi/drm/omap_drm.h
index 1d0b1172664e..0750c01bb480 100644
--- a/include/uapi/drm/omap_drm.h
+++ b/include/uapi/drm/omap_drm.h
@@ -20,7 +20,7 @@
20#ifndef __OMAP_DRM_H__ 20#ifndef __OMAP_DRM_H__
21#define __OMAP_DRM_H__ 21#define __OMAP_DRM_H__
22 22
23#include <drm/drm.h> 23#include "drm.h"
24 24
25/* Please note that modifications to all structs defined here are 25/* Please note that modifications to all structs defined here are
26 * subject to backwards-compatibility constraints. 26 * subject to backwards-compatibility constraints.
diff --git a/include/uapi/drm/qxl_drm.h b/include/uapi/drm/qxl_drm.h
index ebebd36c4117..4d1e32640463 100644
--- a/include/uapi/drm/qxl_drm.h
+++ b/include/uapi/drm/qxl_drm.h
@@ -24,13 +24,12 @@
24#ifndef QXL_DRM_H 24#ifndef QXL_DRM_H
25#define QXL_DRM_H 25#define QXL_DRM_H
26 26
27#include <stddef.h> 27#include "drm.h"
28#include "drm/drm.h"
29 28
30/* Please note that modifications to all structs defined here are 29/* Please note that modifications to all structs defined here are
31 * subject to backwards-compatibility constraints. 30 * subject to backwards-compatibility constraints.
32 * 31 *
33 * Do not use pointers, use uint64_t instead for 32 bit / 64 bit user/kernel 32 * Do not use pointers, use __u64 instead for 32 bit / 64 bit user/kernel
34 * compatibility Keep fields aligned to their size 33 * compatibility Keep fields aligned to their size
35 */ 34 */
36 35
@@ -48,14 +47,14 @@
48#define DRM_QXL_ALLOC_SURF 0x06 47#define DRM_QXL_ALLOC_SURF 0x06
49 48
50struct drm_qxl_alloc { 49struct drm_qxl_alloc {
51 uint32_t size; 50 __u32 size;
52 uint32_t handle; /* 0 is an invalid handle */ 51 __u32 handle; /* 0 is an invalid handle */
53}; 52};
54 53
55struct drm_qxl_map { 54struct drm_qxl_map {
56 uint64_t offset; /* use for mmap system call */ 55 __u64 offset; /* use for mmap system call */
57 uint32_t handle; 56 __u32 handle;
58 uint32_t pad; 57 __u32 pad;
59}; 58};
60 59
61/* 60/*
@@ -68,59 +67,59 @@ struct drm_qxl_map {
68#define QXL_RELOC_TYPE_SURF 2 67#define QXL_RELOC_TYPE_SURF 2
69 68
70struct drm_qxl_reloc { 69struct drm_qxl_reloc {
71 uint64_t src_offset; /* offset into src_handle or src buffer */ 70 __u64 src_offset; /* offset into src_handle or src buffer */
72 uint64_t dst_offset; /* offset in dest handle */ 71 __u64 dst_offset; /* offset in dest handle */
73 uint32_t src_handle; /* dest handle to compute address from */ 72 __u32 src_handle; /* dest handle to compute address from */
74 uint32_t dst_handle; /* 0 if to command buffer */ 73 __u32 dst_handle; /* 0 if to command buffer */
75 uint32_t reloc_type; 74 __u32 reloc_type;
76 uint32_t pad; 75 __u32 pad;
77}; 76};
78 77
79struct drm_qxl_command { 78struct drm_qxl_command {
80 uint64_t __user command; /* void* */ 79 __u64 __user command; /* void* */
81 uint64_t __user relocs; /* struct drm_qxl_reloc* */ 80 __u64 __user relocs; /* struct drm_qxl_reloc* */
82 uint32_t type; 81 __u32 type;
83 uint32_t command_size; 82 __u32 command_size;
84 uint32_t relocs_num; 83 __u32 relocs_num;
85 uint32_t pad; 84 __u32 pad;
86}; 85};
87 86
88/* XXX: call it drm_qxl_commands? */ 87/* XXX: call it drm_qxl_commands? */
89struct drm_qxl_execbuffer { 88struct drm_qxl_execbuffer {
90 uint32_t flags; /* for future use */ 89 __u32 flags; /* for future use */
91 uint32_t commands_num; 90 __u32 commands_num;
92 uint64_t __user commands; /* struct drm_qxl_command* */ 91 __u64 __user commands; /* struct drm_qxl_command* */
93}; 92};
94 93
95struct drm_qxl_update_area { 94struct drm_qxl_update_area {
96 uint32_t handle; 95 __u32 handle;
97 uint32_t top; 96 __u32 top;
98 uint32_t left; 97 __u32 left;
99 uint32_t bottom; 98 __u32 bottom;
100 uint32_t right; 99 __u32 right;
101 uint32_t pad; 100 __u32 pad;
102}; 101};
103 102
104#define QXL_PARAM_NUM_SURFACES 1 /* rom->n_surfaces */ 103#define QXL_PARAM_NUM_SURFACES 1 /* rom->n_surfaces */
105#define QXL_PARAM_MAX_RELOCS 2 104#define QXL_PARAM_MAX_RELOCS 2
106struct drm_qxl_getparam { 105struct drm_qxl_getparam {
107 uint64_t param; 106 __u64 param;
108 uint64_t value; 107 __u64 value;
109}; 108};
110 109
111/* these are one bit values */ 110/* these are one bit values */
112struct drm_qxl_clientcap { 111struct drm_qxl_clientcap {
113 uint32_t index; 112 __u32 index;
114 uint32_t pad; 113 __u32 pad;
115}; 114};
116 115
117struct drm_qxl_alloc_surf { 116struct drm_qxl_alloc_surf {
118 uint32_t format; 117 __u32 format;
119 uint32_t width; 118 __u32 width;
120 uint32_t height; 119 __u32 height;
121 int32_t stride; 120 __s32 stride;
122 uint32_t handle; 121 __u32 handle;
123 uint32_t pad; 122 __u32 pad;
124}; 123};
125 124
126#define DRM_IOCTL_QXL_ALLOC \ 125#define DRM_IOCTL_QXL_ALLOC \
diff --git a/include/uapi/drm/r128_drm.h b/include/uapi/drm/r128_drm.h
index 76b0aa3e8210..7a44c6500a7e 100644
--- a/include/uapi/drm/r128_drm.h
+++ b/include/uapi/drm/r128_drm.h
@@ -33,7 +33,7 @@
33#ifndef __R128_DRM_H__ 33#ifndef __R128_DRM_H__
34#define __R128_DRM_H__ 34#define __R128_DRM_H__
35 35
36#include <drm/drm.h> 36#include "drm.h"
37 37
38/* WARNING: If you change any of these defines, make sure to change the 38/* WARNING: If you change any of these defines, make sure to change the
39 * defines in the X server file (r128_sarea.h) 39 * defines in the X server file (r128_sarea.h)
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index 01aa2a8e3f8d..ccb9bcd82685 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -793,9 +793,9 @@ typedef struct drm_radeon_surface_free {
793#define RADEON_GEM_DOMAIN_VRAM 0x4 793#define RADEON_GEM_DOMAIN_VRAM 0x4
794 794
795struct drm_radeon_gem_info { 795struct drm_radeon_gem_info {
796 uint64_t gart_size; 796 __u64 gart_size;
797 uint64_t vram_size; 797 __u64 vram_size;
798 uint64_t vram_visible; 798 __u64 vram_visible;
799}; 799};
800 800
801#define RADEON_GEM_NO_BACKING_STORE (1 << 0) 801#define RADEON_GEM_NO_BACKING_STORE (1 << 0)
@@ -807,11 +807,11 @@ struct drm_radeon_gem_info {
807#define RADEON_GEM_NO_CPU_ACCESS (1 << 4) 807#define RADEON_GEM_NO_CPU_ACCESS (1 << 4)
808 808
809struct drm_radeon_gem_create { 809struct drm_radeon_gem_create {
810 uint64_t size; 810 __u64 size;
811 uint64_t alignment; 811 __u64 alignment;
812 uint32_t handle; 812 __u32 handle;
813 uint32_t initial_domain; 813 __u32 initial_domain;
814 uint32_t flags; 814 __u32 flags;
815}; 815};
816 816
817/* 817/*
@@ -825,10 +825,10 @@ struct drm_radeon_gem_create {
825#define RADEON_GEM_USERPTR_REGISTER (1 << 3) 825#define RADEON_GEM_USERPTR_REGISTER (1 << 3)
826 826
827struct drm_radeon_gem_userptr { 827struct drm_radeon_gem_userptr {
828 uint64_t addr; 828 __u64 addr;
829 uint64_t size; 829 __u64 size;
830 uint32_t flags; 830 __u32 flags;
831 uint32_t handle; 831 __u32 handle;
832}; 832};
833 833
834#define RADEON_TILING_MACRO 0x1 834#define RADEON_TILING_MACRO 0x1
@@ -850,72 +850,72 @@ struct drm_radeon_gem_userptr {
850#define RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK 0xf 850#define RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK 0xf
851 851
852struct drm_radeon_gem_set_tiling { 852struct drm_radeon_gem_set_tiling {
853 uint32_t handle; 853 __u32 handle;
854 uint32_t tiling_flags; 854 __u32 tiling_flags;
855 uint32_t pitch; 855 __u32 pitch;
856}; 856};
857 857
858struct drm_radeon_gem_get_tiling { 858struct drm_radeon_gem_get_tiling {
859 uint32_t handle; 859 __u32 handle;
860 uint32_t tiling_flags; 860 __u32 tiling_flags;
861 uint32_t pitch; 861 __u32 pitch;
862}; 862};
863 863
864struct drm_radeon_gem_mmap { 864struct drm_radeon_gem_mmap {
865 uint32_t handle; 865 __u32 handle;
866 uint32_t pad; 866 __u32 pad;
867 uint64_t offset; 867 __u64 offset;
868 uint64_t size; 868 __u64 size;
869 uint64_t addr_ptr; 869 __u64 addr_ptr;
870}; 870};
871 871
872struct drm_radeon_gem_set_domain { 872struct drm_radeon_gem_set_domain {
873 uint32_t handle; 873 __u32 handle;
874 uint32_t read_domains; 874 __u32 read_domains;
875 uint32_t write_domain; 875 __u32 write_domain;
876}; 876};
877 877
878struct drm_radeon_gem_wait_idle { 878struct drm_radeon_gem_wait_idle {
879 uint32_t handle; 879 __u32 handle;
880 uint32_t pad; 880 __u32 pad;
881}; 881};
882 882
883struct drm_radeon_gem_busy { 883struct drm_radeon_gem_busy {
884 uint32_t handle; 884 __u32 handle;
885 uint32_t domain; 885 __u32 domain;
886}; 886};
887 887
888struct drm_radeon_gem_pread { 888struct drm_radeon_gem_pread {
889 /** Handle for the object being read. */ 889 /** Handle for the object being read. */
890 uint32_t handle; 890 __u32 handle;
891 uint32_t pad; 891 __u32 pad;
892 /** Offset into the object to read from */ 892 /** Offset into the object to read from */
893 uint64_t offset; 893 __u64 offset;
894 /** Length of data to read */ 894 /** Length of data to read */
895 uint64_t size; 895 __u64 size;
896 /** Pointer to write the data into. */ 896 /** Pointer to write the data into. */
897 /* void *, but pointers are not 32/64 compatible */ 897 /* void *, but pointers are not 32/64 compatible */
898 uint64_t data_ptr; 898 __u64 data_ptr;
899}; 899};
900 900
901struct drm_radeon_gem_pwrite { 901struct drm_radeon_gem_pwrite {
902 /** Handle for the object being written to. */ 902 /** Handle for the object being written to. */
903 uint32_t handle; 903 __u32 handle;
904 uint32_t pad; 904 __u32 pad;
905 /** Offset into the object to write to */ 905 /** Offset into the object to write to */
906 uint64_t offset; 906 __u64 offset;
907 /** Length of data to write */ 907 /** Length of data to write */
908 uint64_t size; 908 __u64 size;
909 /** Pointer to read the data from. */ 909 /** Pointer to read the data from. */
910 /* void *, but pointers are not 32/64 compatible */ 910 /* void *, but pointers are not 32/64 compatible */
911 uint64_t data_ptr; 911 __u64 data_ptr;
912}; 912};
913 913
914/* Sets or returns a value associated with a buffer. */ 914/* Sets or returns a value associated with a buffer. */
915struct drm_radeon_gem_op { 915struct drm_radeon_gem_op {
916 uint32_t handle; /* buffer */ 916 __u32 handle; /* buffer */
917 uint32_t op; /* RADEON_GEM_OP_* */ 917 __u32 op; /* RADEON_GEM_OP_* */
918 uint64_t value; /* input or return value */ 918 __u64 value; /* input or return value */
919}; 919};
920 920
921#define RADEON_GEM_OP_GET_INITIAL_DOMAIN 0 921#define RADEON_GEM_OP_GET_INITIAL_DOMAIN 0
@@ -935,11 +935,11 @@ struct drm_radeon_gem_op {
935#define RADEON_VM_PAGE_SNOOPED (1 << 4) 935#define RADEON_VM_PAGE_SNOOPED (1 << 4)
936 936
937struct drm_radeon_gem_va { 937struct drm_radeon_gem_va {
938 uint32_t handle; 938 __u32 handle;
939 uint32_t operation; 939 __u32 operation;
940 uint32_t vm_id; 940 __u32 vm_id;
941 uint32_t flags; 941 __u32 flags;
942 uint64_t offset; 942 __u64 offset;
943}; 943};
944 944
945#define RADEON_CHUNK_ID_RELOCS 0x01 945#define RADEON_CHUNK_ID_RELOCS 0x01
@@ -961,29 +961,29 @@ struct drm_radeon_gem_va {
961/* 0 = normal, + = higher priority, - = lower priority */ 961/* 0 = normal, + = higher priority, - = lower priority */
962 962
963struct drm_radeon_cs_chunk { 963struct drm_radeon_cs_chunk {
964 uint32_t chunk_id; 964 __u32 chunk_id;
965 uint32_t length_dw; 965 __u32 length_dw;
966 uint64_t chunk_data; 966 __u64 chunk_data;
967}; 967};
968 968
969/* drm_radeon_cs_reloc.flags */ 969/* drm_radeon_cs_reloc.flags */
970#define RADEON_RELOC_PRIO_MASK (0xf << 0) 970#define RADEON_RELOC_PRIO_MASK (0xf << 0)
971 971
972struct drm_radeon_cs_reloc { 972struct drm_radeon_cs_reloc {
973 uint32_t handle; 973 __u32 handle;
974 uint32_t read_domains; 974 __u32 read_domains;
975 uint32_t write_domain; 975 __u32 write_domain;
976 uint32_t flags; 976 __u32 flags;
977}; 977};
978 978
979struct drm_radeon_cs { 979struct drm_radeon_cs {
980 uint32_t num_chunks; 980 __u32 num_chunks;
981 uint32_t cs_id; 981 __u32 cs_id;
982 /* this points to uint64_t * which point to cs chunks */ 982 /* this points to __u64 * which point to cs chunks */
983 uint64_t chunks; 983 __u64 chunks;
984 /* updates to the limits after this CS ioctl */ 984 /* updates to the limits after this CS ioctl */
985 uint64_t gart_limit; 985 __u64 gart_limit;
986 uint64_t vram_limit; 986 __u64 vram_limit;
987}; 987};
988 988
989#define RADEON_INFO_DEVICE_ID 0x00 989#define RADEON_INFO_DEVICE_ID 0x00
@@ -1042,9 +1042,9 @@ struct drm_radeon_cs {
1042#define RADEON_INFO_GPU_RESET_COUNTER 0x26 1042#define RADEON_INFO_GPU_RESET_COUNTER 0x26
1043 1043
1044struct drm_radeon_info { 1044struct drm_radeon_info {
1045 uint32_t request; 1045 __u32 request;
1046 uint32_t pad; 1046 __u32 pad;
1047 uint64_t value; 1047 __u64 value;
1048}; 1048};
1049 1049
1050/* Those correspond to the tile index to use, this is to explicitly state 1050/* Those correspond to the tile index to use, this is to explicitly state
diff --git a/include/uapi/drm/savage_drm.h b/include/uapi/drm/savage_drm.h
index 9dc9dc1a7753..574147489c60 100644
--- a/include/uapi/drm/savage_drm.h
+++ b/include/uapi/drm/savage_drm.h
@@ -26,7 +26,7 @@
26#ifndef __SAVAGE_DRM_H__ 26#ifndef __SAVAGE_DRM_H__
27#define __SAVAGE_DRM_H__ 27#define __SAVAGE_DRM_H__
28 28
29#include <drm/drm.h> 29#include "drm.h"
30 30
31#ifndef __SAVAGE_SAREA_DEFINES__ 31#ifndef __SAVAGE_SAREA_DEFINES__
32#define __SAVAGE_SAREA_DEFINES__ 32#define __SAVAGE_SAREA_DEFINES__
diff --git a/include/uapi/drm/tegra_drm.h b/include/uapi/drm/tegra_drm.h
index 5391780c2b05..27d0b054aed0 100644
--- a/include/uapi/drm/tegra_drm.h
+++ b/include/uapi/drm/tegra_drm.h
@@ -23,7 +23,7 @@
23#ifndef _UAPI_TEGRA_DRM_H_ 23#ifndef _UAPI_TEGRA_DRM_H_
24#define _UAPI_TEGRA_DRM_H_ 24#define _UAPI_TEGRA_DRM_H_
25 25
26#include <drm/drm.h> 26#include "drm.h"
27 27
28#define DRM_TEGRA_GEM_CREATE_TILED (1 << 0) 28#define DRM_TEGRA_GEM_CREATE_TILED (1 << 0)
29#define DRM_TEGRA_GEM_CREATE_BOTTOM_UP (1 << 1) 29#define DRM_TEGRA_GEM_CREATE_BOTTOM_UP (1 << 1)
diff --git a/include/uapi/drm/vc4_drm.h b/include/uapi/drm/vc4_drm.h
new file mode 100644
index 000000000000..eeb37e394f13
--- /dev/null
+++ b/include/uapi/drm/vc4_drm.h
@@ -0,0 +1,279 @@
1/*
2 * Copyright © 2014-2015 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#ifndef _UAPI_VC4_DRM_H_
25#define _UAPI_VC4_DRM_H_
26
27#include "drm.h"
28
29#define DRM_VC4_SUBMIT_CL 0x00
30#define DRM_VC4_WAIT_SEQNO 0x01
31#define DRM_VC4_WAIT_BO 0x02
32#define DRM_VC4_CREATE_BO 0x03
33#define DRM_VC4_MMAP_BO 0x04
34#define DRM_VC4_CREATE_SHADER_BO 0x05
35#define DRM_VC4_GET_HANG_STATE 0x06
36
37#define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl)
38#define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno)
39#define DRM_IOCTL_VC4_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_BO, struct drm_vc4_wait_bo)
40#define DRM_IOCTL_VC4_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_BO, struct drm_vc4_create_bo)
41#define DRM_IOCTL_VC4_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_MMAP_BO, struct drm_vc4_mmap_bo)
42#define DRM_IOCTL_VC4_CREATE_SHADER_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_SHADER_BO, struct drm_vc4_create_shader_bo)
43#define DRM_IOCTL_VC4_GET_HANG_STATE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_HANG_STATE, struct drm_vc4_get_hang_state)
44
45struct drm_vc4_submit_rcl_surface {
46 __u32 hindex; /* Handle index, or ~0 if not present. */
47 __u32 offset; /* Offset to start of buffer. */
48 /*
49 * Bits for either render config (color_write) or load/store packet.
50 * Bits should all be 0 for MSAA load/stores.
51 */
52 __u16 bits;
53
54#define VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES (1 << 0)
55 __u16 flags;
56};
57
58/**
59 * struct drm_vc4_submit_cl - ioctl argument for submitting commands to the 3D
60 * engine.
61 *
62 * Drivers typically use GPU BOs to store batchbuffers / command lists and
63 * their associated state. However, because the VC4 lacks an MMU, we have to
64 * do validation of memory accesses by the GPU commands. If we were to store
65 * our commands in BOs, we'd need to do uncached readback from them to do the
66 * validation process, which is too expensive. Instead, userspace accumulates
67 * commands and associated state in plain memory, then the kernel copies the
68 * data to its own address space, and then validates and stores it in a GPU
69 * BO.
70 */
71struct drm_vc4_submit_cl {
72 /* Pointer to the binner command list.
73 *
74 * This is the first set of commands executed, which runs the
75 * coordinate shader to determine where primitives land on the screen,
76 * then writes out the state updates and draw calls necessary per tile
77 * to the tile allocation BO.
78 */
79 __u64 bin_cl;
80
81 /* Pointer to the shader records.
82 *
83 * Shader records are the structures read by the hardware that contain
84 * pointers to uniforms, shaders, and vertex attributes. The
85 * reference to the shader record has enough information to determine
86 * how many pointers are necessary (fixed number for shaders/uniforms,
87 * and an attribute count), so those BO indices into bo_handles are
88 * just stored as __u32s before each shader record passed in.
89 */
90 __u64 shader_rec;
91
92 /* Pointer to uniform data and texture handles for the textures
93 * referenced by the shader.
94 *
95 * For each shader state record, there is a set of uniform data in the
96 * order referenced by the record (FS, VS, then CS). Each set of
97 * uniform data has a __u32 index into bo_handles per texture
98 * sample operation, in the order the QPU_W_TMUn_S writes appear in
99 * the program. Following the texture BO handle indices is the actual
100 * uniform data.
101 *
102 * The individual uniform state blocks don't have sizes passed in,
103 * because the kernel has to determine the sizes anyway during shader
104 * code validation.
105 */
106 __u64 uniforms;
107 __u64 bo_handles;
108
109 /* Size in bytes of the binner command list. */
110 __u32 bin_cl_size;
111 /* Size in bytes of the set of shader records. */
112 __u32 shader_rec_size;
113 /* Number of shader records.
114 *
115 * This could just be computed from the contents of shader_records and
116 * the address bits of references to them from the bin CL, but it
117 * keeps the kernel from having to resize some allocations it makes.
118 */
119 __u32 shader_rec_count;
120 /* Size in bytes of the uniform state. */
121 __u32 uniforms_size;
122
123 /* Number of BO handles passed in (size is that times 4). */
124 __u32 bo_handle_count;
125
126 /* RCL setup: */
127 __u16 width;
128 __u16 height;
129 __u8 min_x_tile;
130 __u8 min_y_tile;
131 __u8 max_x_tile;
132 __u8 max_y_tile;
133 struct drm_vc4_submit_rcl_surface color_read;
134 struct drm_vc4_submit_rcl_surface color_write;
135 struct drm_vc4_submit_rcl_surface zs_read;
136 struct drm_vc4_submit_rcl_surface zs_write;
137 struct drm_vc4_submit_rcl_surface msaa_color_write;
138 struct drm_vc4_submit_rcl_surface msaa_zs_write;
139 __u32 clear_color[2];
140 __u32 clear_z;
141 __u8 clear_s;
142
143 __u32 pad:24;
144
145#define VC4_SUBMIT_CL_USE_CLEAR_COLOR (1 << 0)
146 __u32 flags;
147
148 /* Returned value of the seqno of this render job (for the
149 * wait ioctl).
150 */
151 __u64 seqno;
152};
153
154/**
155 * struct drm_vc4_wait_seqno - ioctl argument for waiting for
156 * DRM_VC4_SUBMIT_CL completion using its returned seqno.
157 *
158 * timeout_ns is the timeout in nanoseconds, where "0" means "don't
159 * block, just return the status."
160 */
161struct drm_vc4_wait_seqno {
162 __u64 seqno;
163 __u64 timeout_ns;
164};
165
166/**
167 * struct drm_vc4_wait_bo - ioctl argument for waiting for
168 * completion of the last DRM_VC4_SUBMIT_CL on a BO.
169 *
170 * This is useful for cases where multiple processes might be
171 * rendering to a BO and you want to wait for all rendering to be
172 * completed.
173 */
174struct drm_vc4_wait_bo {
175 __u32 handle;
176 __u32 pad;
177 __u64 timeout_ns;
178};
179
180/**
181 * struct drm_vc4_create_bo - ioctl argument for creating VC4 BOs.
182 *
183 * There are currently no values for the flags argument, but it may be
184 * used in a future extension.
185 */
186struct drm_vc4_create_bo {
187 __u32 size;
188 __u32 flags;
189 /** Returned GEM handle for the BO. */
190 __u32 handle;
191 __u32 pad;
192};
193
194/**
195 * struct drm_vc4_mmap_bo - ioctl argument for mapping VC4 BOs.
196 *
197 * This doesn't actually perform an mmap. Instead, it returns the
198 * offset you need to use in an mmap on the DRM device node. This
199 * means that tools like valgrind end up knowing about the mapped
200 * memory.
201 *
202 * There are currently no values for the flags argument, but it may be
203 * used in a future extension.
204 */
205struct drm_vc4_mmap_bo {
206 /** Handle for the object being mapped. */
207 __u32 handle;
208 __u32 flags;
209 /** offset into the drm node to use for subsequent mmap call. */
210 __u64 offset;
211};
212
213/**
214 * struct drm_vc4_create_shader_bo - ioctl argument for creating VC4
215 * shader BOs.
216 *
217 * Since allowing a shader to be overwritten while it's also being
218 * executed from would allow privlege escalation, shaders must be
219 * created using this ioctl, and they can't be mmapped later.
220 */
221struct drm_vc4_create_shader_bo {
222 /* Size of the data argument. */
223 __u32 size;
224 /* Flags, currently must be 0. */
225 __u32 flags;
226
227 /* Pointer to the data. */
228 __u64 data;
229
230 /** Returned GEM handle for the BO. */
231 __u32 handle;
232 /* Pad, must be 0. */
233 __u32 pad;
234};
235
236struct drm_vc4_get_hang_state_bo {
237 __u32 handle;
238 __u32 paddr;
239 __u32 size;
240 __u32 pad;
241};
242
243/**
244 * struct drm_vc4_hang_state - ioctl argument for collecting state
245 * from a GPU hang for analysis.
246*/
247struct drm_vc4_get_hang_state {
248 /** Pointer to array of struct drm_vc4_get_hang_state_bo. */
249 __u64 bo;
250 /**
251 * On input, the size of the bo array. Output is the number
252 * of bos to be returned.
253 */
254 __u32 bo_count;
255
256 __u32 start_bin, start_render;
257
258 __u32 ct0ca, ct0ea;
259 __u32 ct1ca, ct1ea;
260 __u32 ct0cs, ct1cs;
261 __u32 ct0ra0, ct1ra0;
262
263 __u32 bpca, bpcs;
264 __u32 bpoa, bpos;
265
266 __u32 vpmbase;
267
268 __u32 dbge;
269 __u32 fdbgo;
270 __u32 fdbgb;
271 __u32 fdbgr;
272 __u32 fdbgs;
273 __u32 errstat;
274
275 /* Pad that we may save more registers into in the future. */
276 __u32 pad[16];
277};
278
279#endif /* _UAPI_VC4_DRM_H_ */
diff --git a/include/uapi/drm/via_drm.h b/include/uapi/drm/via_drm.h
index 45bc80c3714b..fa21ed185520 100644
--- a/include/uapi/drm/via_drm.h
+++ b/include/uapi/drm/via_drm.h
@@ -24,7 +24,7 @@
24#ifndef _VIA_DRM_H_ 24#ifndef _VIA_DRM_H_
25#define _VIA_DRM_H_ 25#define _VIA_DRM_H_
26 26
27#include <drm/drm.h> 27#include "drm.h"
28 28
29/* WARNING: These defines must be the same as what the Xserver uses. 29/* WARNING: These defines must be the same as what the Xserver uses.
30 * if you change them, you must change the defines in the Xserver. 30 * if you change them, you must change the defines in the Xserver.
@@ -33,9 +33,6 @@
33#ifndef _VIA_DEFINES_ 33#ifndef _VIA_DEFINES_
34#define _VIA_DEFINES_ 34#define _VIA_DEFINES_
35 35
36#ifndef __KERNEL__
37#include "via_drmclient.h"
38#endif
39 36
40#define VIA_NR_SAREA_CLIPRECTS 8 37#define VIA_NR_SAREA_CLIPRECTS 8
41#define VIA_NR_XVMC_PORTS 10 38#define VIA_NR_XVMC_PORTS 10
diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
index fc9e2d6e5e2f..c74f1f90cb37 100644
--- a/include/uapi/drm/virtgpu_drm.h
+++ b/include/uapi/drm/virtgpu_drm.h
@@ -24,13 +24,12 @@
24#ifndef VIRTGPU_DRM_H 24#ifndef VIRTGPU_DRM_H
25#define VIRTGPU_DRM_H 25#define VIRTGPU_DRM_H
26 26
27#include <stddef.h> 27#include "drm.h"
28#include "drm/drm.h"
29 28
30/* Please note that modifications to all structs defined here are 29/* Please note that modifications to all structs defined here are
31 * subject to backwards-compatibility constraints. 30 * subject to backwards-compatibility constraints.
32 * 31 *
33 * Do not use pointers, use uint64_t instead for 32 bit / 64 bit user/kernel 32 * Do not use pointers, use __u64 instead for 32 bit / 64 bit user/kernel
34 * compatibility Keep fields aligned to their size 33 * compatibility Keep fields aligned to their size
35 */ 34 */
36 35
@@ -45,88 +44,88 @@
45#define DRM_VIRTGPU_GET_CAPS 0x09 44#define DRM_VIRTGPU_GET_CAPS 0x09
46 45
47struct drm_virtgpu_map { 46struct drm_virtgpu_map {
48 uint64_t offset; /* use for mmap system call */ 47 __u64 offset; /* use for mmap system call */
49 uint32_t handle; 48 __u32 handle;
50 uint32_t pad; 49 __u32 pad;
51}; 50};
52 51
53struct drm_virtgpu_execbuffer { 52struct drm_virtgpu_execbuffer {
54 uint32_t flags; /* for future use */ 53 __u32 flags; /* for future use */
55 uint32_t size; 54 __u32 size;
56 uint64_t command; /* void* */ 55 __u64 command; /* void* */
57 uint64_t bo_handles; 56 __u64 bo_handles;
58 uint32_t num_bo_handles; 57 __u32 num_bo_handles;
59 uint32_t pad; 58 __u32 pad;
60}; 59};
61 60
62#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */ 61#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
63 62
64struct drm_virtgpu_getparam { 63struct drm_virtgpu_getparam {
65 uint64_t param; 64 __u64 param;
66 uint64_t value; 65 __u64 value;
67}; 66};
68 67
69/* NO_BO flags? NO resource flag? */ 68/* NO_BO flags? NO resource flag? */
70/* resource flag for y_0_top */ 69/* resource flag for y_0_top */
71struct drm_virtgpu_resource_create { 70struct drm_virtgpu_resource_create {
72 uint32_t target; 71 __u32 target;
73 uint32_t format; 72 __u32 format;
74 uint32_t bind; 73 __u32 bind;
75 uint32_t width; 74 __u32 width;
76 uint32_t height; 75 __u32 height;
77 uint32_t depth; 76 __u32 depth;
78 uint32_t array_size; 77 __u32 array_size;
79 uint32_t last_level; 78 __u32 last_level;
80 uint32_t nr_samples; 79 __u32 nr_samples;
81 uint32_t flags; 80 __u32 flags;
82 uint32_t bo_handle; /* if this is set - recreate a new resource attached to this bo ? */ 81 __u32 bo_handle; /* if this is set - recreate a new resource attached to this bo ? */
83 uint32_t res_handle; /* returned by kernel */ 82 __u32 res_handle; /* returned by kernel */
84 uint32_t size; /* validate transfer in the host */ 83 __u32 size; /* validate transfer in the host */
85 uint32_t stride; /* validate transfer in the host */ 84 __u32 stride; /* validate transfer in the host */
86}; 85};
87 86
88struct drm_virtgpu_resource_info { 87struct drm_virtgpu_resource_info {
89 uint32_t bo_handle; 88 __u32 bo_handle;
90 uint32_t res_handle; 89 __u32 res_handle;
91 uint32_t size; 90 __u32 size;
92 uint32_t stride; 91 __u32 stride;
93}; 92};
94 93
95struct drm_virtgpu_3d_box { 94struct drm_virtgpu_3d_box {
96 uint32_t x; 95 __u32 x;
97 uint32_t y; 96 __u32 y;
98 uint32_t z; 97 __u32 z;
99 uint32_t w; 98 __u32 w;
100 uint32_t h; 99 __u32 h;
101 uint32_t d; 100 __u32 d;
102}; 101};
103 102
104struct drm_virtgpu_3d_transfer_to_host { 103struct drm_virtgpu_3d_transfer_to_host {
105 uint32_t bo_handle; 104 __u32 bo_handle;
106 struct drm_virtgpu_3d_box box; 105 struct drm_virtgpu_3d_box box;
107 uint32_t level; 106 __u32 level;
108 uint32_t offset; 107 __u32 offset;
109}; 108};
110 109
111struct drm_virtgpu_3d_transfer_from_host { 110struct drm_virtgpu_3d_transfer_from_host {
112 uint32_t bo_handle; 111 __u32 bo_handle;
113 struct drm_virtgpu_3d_box box; 112 struct drm_virtgpu_3d_box box;
114 uint32_t level; 113 __u32 level;
115 uint32_t offset; 114 __u32 offset;
116}; 115};
117 116
118#define VIRTGPU_WAIT_NOWAIT 1 /* like it */ 117#define VIRTGPU_WAIT_NOWAIT 1 /* like it */
119struct drm_virtgpu_3d_wait { 118struct drm_virtgpu_3d_wait {
120 uint32_t handle; /* 0 is an invalid handle */ 119 __u32 handle; /* 0 is an invalid handle */
121 uint32_t flags; 120 __u32 flags;
122}; 121};
123 122
124struct drm_virtgpu_get_caps { 123struct drm_virtgpu_get_caps {
125 uint32_t cap_set_id; 124 __u32 cap_set_id;
126 uint32_t cap_set_ver; 125 __u32 cap_set_ver;
127 uint64_t addr; 126 __u64 addr;
128 uint32_t size; 127 __u32 size;
129 uint32_t pad; 128 __u32 pad;
130}; 129};
131 130
132#define DRM_IOCTL_VIRTGPU_MAP \ 131#define DRM_IOCTL_VIRTGPU_MAP \
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index 05b204954d16..5b68b4d10884 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -28,9 +28,7 @@
28#ifndef __VMWGFX_DRM_H__ 28#ifndef __VMWGFX_DRM_H__
29#define __VMWGFX_DRM_H__ 29#define __VMWGFX_DRM_H__
30 30
31#ifndef __KERNEL__ 31#include "drm.h"
32#include <drm/drm.h>
33#endif
34 32
35#define DRM_VMW_MAX_SURFACE_FACES 6 33#define DRM_VMW_MAX_SURFACE_FACES 6
36#define DRM_VMW_MAX_MIP_LEVELS 24 34#define DRM_VMW_MAX_MIP_LEVELS 24
@@ -111,9 +109,9 @@ enum drm_vmw_handle_type {
111 */ 109 */
112 110
113struct drm_vmw_getparam_arg { 111struct drm_vmw_getparam_arg {
114 uint64_t value; 112 __u64 value;
115 uint32_t param; 113 __u32 param;
116 uint32_t pad64; 114 __u32 pad64;
117}; 115};
118 116
119/*************************************************************************/ 117/*************************************************************************/
@@ -134,8 +132,8 @@ struct drm_vmw_getparam_arg {
134 */ 132 */
135 133
136struct drm_vmw_context_arg { 134struct drm_vmw_context_arg {
137 int32_t cid; 135 __s32 cid;
138 uint32_t pad64; 136 __u32 pad64;
139}; 137};
140 138
141/*************************************************************************/ 139/*************************************************************************/
@@ -165,7 +163,7 @@ struct drm_vmw_context_arg {
165 * @mip_levels: Number of mip levels for each face. 163 * @mip_levels: Number of mip levels for each face.
166 * An unused face should have 0 encoded. 164 * An unused face should have 0 encoded.
167 * @size_addr: Address of a user-space array of sruct drm_vmw_size 165 * @size_addr: Address of a user-space array of sruct drm_vmw_size
168 * cast to an uint64_t for 32-64 bit compatibility. 166 * cast to an __u64 for 32-64 bit compatibility.
169 * The size of the array should equal the total number of mipmap levels. 167 * The size of the array should equal the total number of mipmap levels.
170 * @shareable: Boolean whether other clients (as identified by file descriptors) 168 * @shareable: Boolean whether other clients (as identified by file descriptors)
171 * may reference this surface. 169 * may reference this surface.
@@ -177,12 +175,12 @@ struct drm_vmw_context_arg {
177 */ 175 */
178 176
179struct drm_vmw_surface_create_req { 177struct drm_vmw_surface_create_req {
180 uint32_t flags; 178 __u32 flags;
181 uint32_t format; 179 __u32 format;
182 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; 180 __u32 mip_levels[DRM_VMW_MAX_SURFACE_FACES];
183 uint64_t size_addr; 181 __u64 size_addr;
184 int32_t shareable; 182 __s32 shareable;
185 int32_t scanout; 183 __s32 scanout;
186}; 184};
187 185
188/** 186/**
@@ -197,7 +195,7 @@ struct drm_vmw_surface_create_req {
197 */ 195 */
198 196
199struct drm_vmw_surface_arg { 197struct drm_vmw_surface_arg {
200 int32_t sid; 198 __s32 sid;
201 enum drm_vmw_handle_type handle_type; 199 enum drm_vmw_handle_type handle_type;
202}; 200};
203 201
@@ -213,10 +211,10 @@ struct drm_vmw_surface_arg {
213 */ 211 */
214 212
215struct drm_vmw_size { 213struct drm_vmw_size {
216 uint32_t width; 214 __u32 width;
217 uint32_t height; 215 __u32 height;
218 uint32_t depth; 216 __u32 depth;
219 uint32_t pad64; 217 __u32 pad64;
220}; 218};
221 219
222/** 220/**
@@ -284,13 +282,13 @@ union drm_vmw_surface_reference_arg {
284/** 282/**
285 * struct drm_vmw_execbuf_arg 283 * struct drm_vmw_execbuf_arg
286 * 284 *
287 * @commands: User-space address of a command buffer cast to an uint64_t. 285 * @commands: User-space address of a command buffer cast to an __u64.
288 * @command-size: Size in bytes of the command buffer. 286 * @command-size: Size in bytes of the command buffer.
289 * @throttle-us: Sleep until software is less than @throttle_us 287 * @throttle-us: Sleep until software is less than @throttle_us
290 * microseconds ahead of hardware. The driver may round this value 288 * microseconds ahead of hardware. The driver may round this value
291 * to the nearest kernel tick. 289 * to the nearest kernel tick.
292 * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an 290 * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an
293 * uint64_t. 291 * __u64.
294 * @version: Allows expanding the execbuf ioctl parameters without breaking 292 * @version: Allows expanding the execbuf ioctl parameters without breaking
295 * backwards compatibility, since user-space will always tell the kernel 293 * backwards compatibility, since user-space will always tell the kernel
296 * which version it uses. 294 * which version it uses.
@@ -302,14 +300,14 @@ union drm_vmw_surface_reference_arg {
302#define DRM_VMW_EXECBUF_VERSION 2 300#define DRM_VMW_EXECBUF_VERSION 2
303 301
304struct drm_vmw_execbuf_arg { 302struct drm_vmw_execbuf_arg {
305 uint64_t commands; 303 __u64 commands;
306 uint32_t command_size; 304 __u32 command_size;
307 uint32_t throttle_us; 305 __u32 throttle_us;
308 uint64_t fence_rep; 306 __u64 fence_rep;
309 uint32_t version; 307 __u32 version;
310 uint32_t flags; 308 __u32 flags;
311 uint32_t context_handle; 309 __u32 context_handle;
312 uint32_t pad64; 310 __u32 pad64;
313}; 311};
314 312
315/** 313/**
@@ -338,12 +336,12 @@ struct drm_vmw_execbuf_arg {
338 */ 336 */
339 337
340struct drm_vmw_fence_rep { 338struct drm_vmw_fence_rep {
341 uint32_t handle; 339 __u32 handle;
342 uint32_t mask; 340 __u32 mask;
343 uint32_t seqno; 341 __u32 seqno;
344 uint32_t passed_seqno; 342 __u32 passed_seqno;
345 uint32_t pad64; 343 __u32 pad64;
346 int32_t error; 344 __s32 error;
347}; 345};
348 346
349/*************************************************************************/ 347/*************************************************************************/
@@ -373,8 +371,8 @@ struct drm_vmw_fence_rep {
373 */ 371 */
374 372
375struct drm_vmw_alloc_dmabuf_req { 373struct drm_vmw_alloc_dmabuf_req {
376 uint32_t size; 374 __u32 size;
377 uint32_t pad64; 375 __u32 pad64;
378}; 376};
379 377
380/** 378/**
@@ -391,11 +389,11 @@ struct drm_vmw_alloc_dmabuf_req {
391 */ 389 */
392 390
393struct drm_vmw_dmabuf_rep { 391struct drm_vmw_dmabuf_rep {
394 uint64_t map_handle; 392 __u64 map_handle;
395 uint32_t handle; 393 __u32 handle;
396 uint32_t cur_gmr_id; 394 __u32 cur_gmr_id;
397 uint32_t cur_gmr_offset; 395 __u32 cur_gmr_offset;
398 uint32_t pad64; 396 __u32 pad64;
399}; 397};
400 398
401/** 399/**
@@ -428,8 +426,8 @@ union drm_vmw_alloc_dmabuf_arg {
428 */ 426 */
429 427
430struct drm_vmw_unref_dmabuf_arg { 428struct drm_vmw_unref_dmabuf_arg {
431 uint32_t handle; 429 __u32 handle;
432 uint32_t pad64; 430 __u32 pad64;
433}; 431};
434 432
435/*************************************************************************/ 433/*************************************************************************/
@@ -452,10 +450,10 @@ struct drm_vmw_unref_dmabuf_arg {
452 */ 450 */
453 451
454struct drm_vmw_rect { 452struct drm_vmw_rect {
455 int32_t x; 453 __s32 x;
456 int32_t y; 454 __s32 y;
457 uint32_t w; 455 __u32 w;
458 uint32_t h; 456 __u32 h;
459}; 457};
460 458
461/** 459/**
@@ -477,21 +475,21 @@ struct drm_vmw_rect {
477 */ 475 */
478 476
479struct drm_vmw_control_stream_arg { 477struct drm_vmw_control_stream_arg {
480 uint32_t stream_id; 478 __u32 stream_id;
481 uint32_t enabled; 479 __u32 enabled;
482 480
483 uint32_t flags; 481 __u32 flags;
484 uint32_t color_key; 482 __u32 color_key;
485 483
486 uint32_t handle; 484 __u32 handle;
487 uint32_t offset; 485 __u32 offset;
488 int32_t format; 486 __s32 format;
489 uint32_t size; 487 __u32 size;
490 uint32_t width; 488 __u32 width;
491 uint32_t height; 489 __u32 height;
492 uint32_t pitch[3]; 490 __u32 pitch[3];
493 491
494 uint32_t pad64; 492 __u32 pad64;
495 struct drm_vmw_rect src; 493 struct drm_vmw_rect src;
496 struct drm_vmw_rect dst; 494 struct drm_vmw_rect dst;
497}; 495};
@@ -519,12 +517,12 @@ struct drm_vmw_control_stream_arg {
519 */ 517 */
520 518
521struct drm_vmw_cursor_bypass_arg { 519struct drm_vmw_cursor_bypass_arg {
522 uint32_t flags; 520 __u32 flags;
523 uint32_t crtc_id; 521 __u32 crtc_id;
524 int32_t xpos; 522 __s32 xpos;
525 int32_t ypos; 523 __s32 ypos;
526 int32_t xhot; 524 __s32 xhot;
527 int32_t yhot; 525 __s32 yhot;
528}; 526};
529 527
530/*************************************************************************/ 528/*************************************************************************/
@@ -542,8 +540,8 @@ struct drm_vmw_cursor_bypass_arg {
542 */ 540 */
543 541
544struct drm_vmw_stream_arg { 542struct drm_vmw_stream_arg {
545 uint32_t stream_id; 543 __u32 stream_id;
546 uint32_t pad64; 544 __u32 pad64;
547}; 545};
548 546
549/*************************************************************************/ 547/*************************************************************************/
@@ -565,7 +563,7 @@ struct drm_vmw_stream_arg {
565/** 563/**
566 * struct drm_vmw_get_3d_cap_arg 564 * struct drm_vmw_get_3d_cap_arg
567 * 565 *
568 * @buffer: Pointer to a buffer for capability data, cast to an uint64_t 566 * @buffer: Pointer to a buffer for capability data, cast to an __u64
569 * @size: Max size to copy 567 * @size: Max size to copy
570 * 568 *
571 * Input argument to the DRM_VMW_GET_3D_CAP_IOCTL 569 * Input argument to the DRM_VMW_GET_3D_CAP_IOCTL
@@ -573,9 +571,9 @@ struct drm_vmw_stream_arg {
573 */ 571 */
574 572
575struct drm_vmw_get_3d_cap_arg { 573struct drm_vmw_get_3d_cap_arg {
576 uint64_t buffer; 574 __u64 buffer;
577 uint32_t max_size; 575 __u32 max_size;
578 uint32_t pad64; 576 __u32 pad64;
579}; 577};
580 578
581/*************************************************************************/ 579/*************************************************************************/
@@ -624,14 +622,14 @@ struct drm_vmw_get_3d_cap_arg {
624 */ 622 */
625 623
626struct drm_vmw_fence_wait_arg { 624struct drm_vmw_fence_wait_arg {
627 uint32_t handle; 625 __u32 handle;
628 int32_t cookie_valid; 626 __s32 cookie_valid;
629 uint64_t kernel_cookie; 627 __u64 kernel_cookie;
630 uint64_t timeout_us; 628 __u64 timeout_us;
631 int32_t lazy; 629 __s32 lazy;
632 int32_t flags; 630 __s32 flags;
633 int32_t wait_options; 631 __s32 wait_options;
634 int32_t pad64; 632 __s32 pad64;
635}; 633};
636 634
637/*************************************************************************/ 635/*************************************************************************/
@@ -655,12 +653,12 @@ struct drm_vmw_fence_wait_arg {
655 */ 653 */
656 654
657struct drm_vmw_fence_signaled_arg { 655struct drm_vmw_fence_signaled_arg {
658 uint32_t handle; 656 __u32 handle;
659 uint32_t flags; 657 __u32 flags;
660 int32_t signaled; 658 __s32 signaled;
661 uint32_t passed_seqno; 659 __u32 passed_seqno;
662 uint32_t signaled_flags; 660 __u32 signaled_flags;
663 uint32_t pad64; 661 __u32 pad64;
664}; 662};
665 663
666/*************************************************************************/ 664/*************************************************************************/
@@ -681,8 +679,8 @@ struct drm_vmw_fence_signaled_arg {
681 */ 679 */
682 680
683struct drm_vmw_fence_arg { 681struct drm_vmw_fence_arg {
684 uint32_t handle; 682 __u32 handle;
685 uint32_t pad64; 683 __u32 pad64;
686}; 684};
687 685
688 686
@@ -703,9 +701,9 @@ struct drm_vmw_fence_arg {
703 701
704struct drm_vmw_event_fence { 702struct drm_vmw_event_fence {
705 struct drm_event base; 703 struct drm_event base;
706 uint64_t user_data; 704 __u64 user_data;
707 uint32_t tv_sec; 705 __u32 tv_sec;
708 uint32_t tv_usec; 706 __u32 tv_usec;
709}; 707};
710 708
711/* 709/*
@@ -717,17 +715,17 @@ struct drm_vmw_event_fence {
717/** 715/**
718 * struct drm_vmw_fence_event_arg 716 * struct drm_vmw_fence_event_arg
719 * 717 *
720 * @fence_rep: Pointer to fence_rep structure cast to uint64_t or 0 if 718 * @fence_rep: Pointer to fence_rep structure cast to __u64 or 0 if
721 * the fence is not supposed to be referenced by user-space. 719 * the fence is not supposed to be referenced by user-space.
722 * @user_info: Info to be delivered with the event. 720 * @user_info: Info to be delivered with the event.
723 * @handle: Attach the event to this fence only. 721 * @handle: Attach the event to this fence only.
724 * @flags: A set of flags as defined above. 722 * @flags: A set of flags as defined above.
725 */ 723 */
726struct drm_vmw_fence_event_arg { 724struct drm_vmw_fence_event_arg {
727 uint64_t fence_rep; 725 __u64 fence_rep;
728 uint64_t user_data; 726 __u64 user_data;
729 uint32_t handle; 727 __u32 handle;
730 uint32_t flags; 728 __u32 flags;
731}; 729};
732 730
733 731
@@ -747,7 +745,7 @@ struct drm_vmw_fence_event_arg {
747 * @sid: Surface id to present from. 745 * @sid: Surface id to present from.
748 * @dest_x: X placement coordinate for surface. 746 * @dest_x: X placement coordinate for surface.
749 * @dest_y: Y placement coordinate for surface. 747 * @dest_y: Y placement coordinate for surface.
750 * @clips_ptr: Pointer to an array of clip rects cast to an uint64_t. 748 * @clips_ptr: Pointer to an array of clip rects cast to an __u64.
751 * @num_clips: Number of cliprects given relative to the framebuffer origin, 749 * @num_clips: Number of cliprects given relative to the framebuffer origin,
752 * in the same coordinate space as the frame buffer. 750 * in the same coordinate space as the frame buffer.
753 * @pad64: Unused 64-bit padding. 751 * @pad64: Unused 64-bit padding.
@@ -756,13 +754,13 @@ struct drm_vmw_fence_event_arg {
756 */ 754 */
757 755
758struct drm_vmw_present_arg { 756struct drm_vmw_present_arg {
759 uint32_t fb_id; 757 __u32 fb_id;
760 uint32_t sid; 758 __u32 sid;
761 int32_t dest_x; 759 __s32 dest_x;
762 int32_t dest_y; 760 __s32 dest_y;
763 uint64_t clips_ptr; 761 __u64 clips_ptr;
764 uint32_t num_clips; 762 __u32 num_clips;
765 uint32_t pad64; 763 __u32 pad64;
766}; 764};
767 765
768 766
@@ -780,16 +778,16 @@ struct drm_vmw_present_arg {
780 * struct drm_vmw_present_arg 778 * struct drm_vmw_present_arg
781 * @fb_id: fb_id to present / read back from. 779 * @fb_id: fb_id to present / read back from.
782 * @num_clips: Number of cliprects. 780 * @num_clips: Number of cliprects.
783 * @clips_ptr: Pointer to an array of clip rects cast to an uint64_t. 781 * @clips_ptr: Pointer to an array of clip rects cast to an __u64.
784 * @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an uint64_t. 782 * @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an __u64.
785 * If this member is NULL, then the ioctl should not return a fence. 783 * If this member is NULL, then the ioctl should not return a fence.
786 */ 784 */
787 785
788struct drm_vmw_present_readback_arg { 786struct drm_vmw_present_readback_arg {
789 uint32_t fb_id; 787 __u32 fb_id;
790 uint32_t num_clips; 788 __u32 num_clips;
791 uint64_t clips_ptr; 789 __u64 clips_ptr;
792 uint64_t fence_rep; 790 __u64 fence_rep;
793}; 791};
794 792
795/*************************************************************************/ 793/*************************************************************************/
@@ -805,14 +803,14 @@ struct drm_vmw_present_readback_arg {
805 * struct drm_vmw_update_layout_arg 803 * struct drm_vmw_update_layout_arg
806 * 804 *
807 * @num_outputs: number of active connectors 805 * @num_outputs: number of active connectors
808 * @rects: pointer to array of drm_vmw_rect cast to an uint64_t 806 * @rects: pointer to array of drm_vmw_rect cast to an __u64
809 * 807 *
810 * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl. 808 * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl.
811 */ 809 */
812struct drm_vmw_update_layout_arg { 810struct drm_vmw_update_layout_arg {
813 uint32_t num_outputs; 811 __u32 num_outputs;
814 uint32_t pad64; 812 __u32 pad64;
815 uint64_t rects; 813 __u64 rects;
816}; 814};
817 815
818 816
@@ -849,10 +847,10 @@ enum drm_vmw_shader_type {
849 */ 847 */
850struct drm_vmw_shader_create_arg { 848struct drm_vmw_shader_create_arg {
851 enum drm_vmw_shader_type shader_type; 849 enum drm_vmw_shader_type shader_type;
852 uint32_t size; 850 __u32 size;
853 uint32_t buffer_handle; 851 __u32 buffer_handle;
854 uint32_t shader_handle; 852 __u32 shader_handle;
855 uint64_t offset; 853 __u64 offset;
856}; 854};
857 855
858/*************************************************************************/ 856/*************************************************************************/
@@ -871,8 +869,8 @@ struct drm_vmw_shader_create_arg {
871 * Input argument to the DRM_VMW_UNREF_SHADER ioctl. 869 * Input argument to the DRM_VMW_UNREF_SHADER ioctl.
872 */ 870 */
873struct drm_vmw_shader_arg { 871struct drm_vmw_shader_arg {
874 uint32_t handle; 872 __u32 handle;
875 uint32_t pad64; 873 __u32 pad64;
876}; 874};
877 875
878/*************************************************************************/ 876/*************************************************************************/
@@ -918,14 +916,14 @@ enum drm_vmw_surface_flags {
918 * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl. 916 * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.
919 */ 917 */
920struct drm_vmw_gb_surface_create_req { 918struct drm_vmw_gb_surface_create_req {
921 uint32_t svga3d_flags; 919 __u32 svga3d_flags;
922 uint32_t format; 920 __u32 format;
923 uint32_t mip_levels; 921 __u32 mip_levels;
924 enum drm_vmw_surface_flags drm_surface_flags; 922 enum drm_vmw_surface_flags drm_surface_flags;
925 uint32_t multisample_count; 923 __u32 multisample_count;
926 uint32_t autogen_filter; 924 __u32 autogen_filter;
927 uint32_t buffer_handle; 925 __u32 buffer_handle;
928 uint32_t array_size; 926 __u32 array_size;
929 struct drm_vmw_size base_size; 927 struct drm_vmw_size base_size;
930}; 928};
931 929
@@ -944,11 +942,11 @@ struct drm_vmw_gb_surface_create_req {
944 * Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl. 942 * Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl.
945 */ 943 */
946struct drm_vmw_gb_surface_create_rep { 944struct drm_vmw_gb_surface_create_rep {
947 uint32_t handle; 945 __u32 handle;
948 uint32_t backup_size; 946 __u32 backup_size;
949 uint32_t buffer_handle; 947 __u32 buffer_handle;
950 uint32_t buffer_size; 948 __u32 buffer_size;
951 uint64_t buffer_map_handle; 949 __u64 buffer_map_handle;
952}; 950};
953 951
954/** 952/**
@@ -1061,8 +1059,8 @@ enum drm_vmw_synccpu_op {
1061struct drm_vmw_synccpu_arg { 1059struct drm_vmw_synccpu_arg {
1062 enum drm_vmw_synccpu_op op; 1060 enum drm_vmw_synccpu_op op;
1063 enum drm_vmw_synccpu_flags flags; 1061 enum drm_vmw_synccpu_flags flags;
1064 uint32_t handle; 1062 __u32 handle;
1065 uint32_t pad64; 1063 __u32 pad64;
1066}; 1064};
1067 1065
1068/*************************************************************************/ 1066/*************************************************************************/
diff --git a/include/uapi/linux/agpgart.h b/include/uapi/linux/agpgart.h
index 4e828cf487bc..f5251045181a 100644
--- a/include/uapi/linux/agpgart.h
+++ b/include/uapi/linux/agpgart.h
@@ -52,6 +52,7 @@
52 52
53#ifndef __KERNEL__ 53#ifndef __KERNEL__
54#include <linux/types.h> 54#include <linux/types.h>
55#include <stdlib.h>
55 56
56struct agp_version { 57struct agp_version {
57 __u16 major; 58 __u16 major;
diff --git a/include/uapi/linux/nfs.h b/include/uapi/linux/nfs.h
index 654bae3f1a38..5e6296160361 100644
--- a/include/uapi/linux/nfs.h
+++ b/include/uapi/linux/nfs.h
@@ -33,17 +33,6 @@
33 33
34#define NFS_PIPE_DIRNAME "nfs" 34#define NFS_PIPE_DIRNAME "nfs"
35 35
36/* NFS ioctls */
37/* Let's follow btrfs lead on CLONE to avoid messing userspace */
38#define NFS_IOC_CLONE _IOW(0x94, 9, int)
39#define NFS_IOC_CLONE_RANGE _IOW(0x94, 13, int)
40
41struct nfs_ioctl_clone_range_args {
42 __s64 src_fd;
43 __u64 src_off, count;
44 __u64 dst_off;
45};
46
47/* 36/*
48 * NFS stats. The good thing with these values is that NFSv3 errors are 37 * NFS stats. The good thing with these values is that NFSv3 errors are
49 * a superset of NFSv2 errors (with the exception of NFSERR_WFLUSH which 38 * a superset of NFSv2 errors (with the exception of NFSERR_WFLUSH which
diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h
index 7a63faa9065c..4b04ead26cd9 100644
--- a/include/uapi/linux/virtio_gpu.h
+++ b/include/uapi/linux/virtio_gpu.h
@@ -287,7 +287,7 @@ struct virtio_gpu_get_capset {
287/* VIRTIO_GPU_RESP_OK_CAPSET */ 287/* VIRTIO_GPU_RESP_OK_CAPSET */
288struct virtio_gpu_resp_capset { 288struct virtio_gpu_resp_capset {
289 struct virtio_gpu_ctrl_hdr hdr; 289 struct virtio_gpu_ctrl_hdr hdr;
290 uint8_t capset_data[]; 290 __u8 capset_data[];
291}; 291};
292 292
293#define VIRTIO_GPU_EVENT_DISPLAY (1 << 0) 293#define VIRTIO_GPU_EVENT_DISPLAY (1 << 0)
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
index 85dedca3dcfb..eeba75395f7d 100644
--- a/include/video/imx-ipu-v3.h
+++ b/include/video/imx-ipu-v3.h
@@ -343,7 +343,6 @@ struct ipu_client_platformdata {
343 int di; 343 int di;
344 int dc; 344 int dc;
345 int dp; 345 int dp;
346 int dmfc;
347 int dma[2]; 346 int dma[2];
348}; 347};
349 348
diff --git a/kernel/async.c b/kernel/async.c
index 4c3773c0bf63..d2edd6efec56 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -326,3 +326,4 @@ bool current_is_async(void)
326 326
327 return worker && worker->current_func == async_run_entry_fn; 327 return worker && worker->current_func == async_run_entry_fn;
328} 328}
329EXPORT_SYMBOL_GPL(current_is_async);
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 3f4c99e06c6b..b0799bced518 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -28,11 +28,17 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
28 attr->value_size == 0) 28 attr->value_size == 0)
29 return ERR_PTR(-EINVAL); 29 return ERR_PTR(-EINVAL);
30 30
31 if (attr->value_size >= 1 << (KMALLOC_SHIFT_MAX - 1))
32 /* if value_size is bigger, the user space won't be able to
33 * access the elements.
34 */
35 return ERR_PTR(-E2BIG);
36
31 elem_size = round_up(attr->value_size, 8); 37 elem_size = round_up(attr->value_size, 8);
32 38
33 /* check round_up into zero and u32 overflow */ 39 /* check round_up into zero and u32 overflow */
34 if (elem_size == 0 || 40 if (elem_size == 0 ||
35 attr->max_entries > (U32_MAX - sizeof(*array)) / elem_size) 41 attr->max_entries > (U32_MAX - PAGE_SIZE - sizeof(*array)) / elem_size)
36 return ERR_PTR(-ENOMEM); 42 return ERR_PTR(-ENOMEM);
37 43
38 array_size = sizeof(*array) + attr->max_entries * elem_size; 44 array_size = sizeof(*array) + attr->max_entries * elem_size;
@@ -105,7 +111,7 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
105 /* all elements already exist */ 111 /* all elements already exist */
106 return -EEXIST; 112 return -EEXIST;
107 113
108 memcpy(array->value + array->elem_size * index, value, array->elem_size); 114 memcpy(array->value + array->elem_size * index, value, map->value_size);
109 return 0; 115 return 0;
110} 116}
111 117
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 19909b22b4f8..34777b3746fa 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -64,12 +64,35 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
64 */ 64 */
65 goto free_htab; 65 goto free_htab;
66 66
67 err = -ENOMEM; 67 if (htab->map.value_size >= (1 << (KMALLOC_SHIFT_MAX - 1)) -
68 MAX_BPF_STACK - sizeof(struct htab_elem))
69 /* if value_size is bigger, the user space won't be able to
70 * access the elements via bpf syscall. This check also makes
71 * sure that the elem_size doesn't overflow and it's
72 * kmalloc-able later in htab_map_update_elem()
73 */
74 goto free_htab;
75
76 htab->elem_size = sizeof(struct htab_elem) +
77 round_up(htab->map.key_size, 8) +
78 htab->map.value_size;
79
68 /* prevent zero size kmalloc and check for u32 overflow */ 80 /* prevent zero size kmalloc and check for u32 overflow */
69 if (htab->n_buckets == 0 || 81 if (htab->n_buckets == 0 ||
70 htab->n_buckets > U32_MAX / sizeof(struct hlist_head)) 82 htab->n_buckets > U32_MAX / sizeof(struct hlist_head))
71 goto free_htab; 83 goto free_htab;
72 84
85 if ((u64) htab->n_buckets * sizeof(struct hlist_head) +
86 (u64) htab->elem_size * htab->map.max_entries >=
87 U32_MAX - PAGE_SIZE)
88 /* make sure page count doesn't overflow */
89 goto free_htab;
90
91 htab->map.pages = round_up(htab->n_buckets * sizeof(struct hlist_head) +
92 htab->elem_size * htab->map.max_entries,
93 PAGE_SIZE) >> PAGE_SHIFT;
94
95 err = -ENOMEM;
73 htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct hlist_head), 96 htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct hlist_head),
74 GFP_USER | __GFP_NOWARN); 97 GFP_USER | __GFP_NOWARN);
75 98
@@ -85,13 +108,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
85 raw_spin_lock_init(&htab->lock); 108 raw_spin_lock_init(&htab->lock);
86 htab->count = 0; 109 htab->count = 0;
87 110
88 htab->elem_size = sizeof(struct htab_elem) +
89 round_up(htab->map.key_size, 8) +
90 htab->map.value_size;
91
92 htab->map.pages = round_up(htab->n_buckets * sizeof(struct hlist_head) +
93 htab->elem_size * htab->map.max_entries,
94 PAGE_SIZE) >> PAGE_SHIFT;
95 return &htab->map; 111 return &htab->map;
96 112
97free_htab: 113free_htab:
@@ -222,7 +238,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
222 WARN_ON_ONCE(!rcu_read_lock_held()); 238 WARN_ON_ONCE(!rcu_read_lock_held());
223 239
224 /* allocate new element outside of lock */ 240 /* allocate new element outside of lock */
225 l_new = kmalloc(htab->elem_size, GFP_ATOMIC); 241 l_new = kmalloc(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN);
226 if (!l_new) 242 if (!l_new)
227 return -ENOMEM; 243 return -ENOMEM;
228 244
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index be6d726e31c9..5a8a797d50b7 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -34,7 +34,7 @@ static void *bpf_any_get(void *raw, enum bpf_type type)
34 atomic_inc(&((struct bpf_prog *)raw)->aux->refcnt); 34 atomic_inc(&((struct bpf_prog *)raw)->aux->refcnt);
35 break; 35 break;
36 case BPF_TYPE_MAP: 36 case BPF_TYPE_MAP:
37 atomic_inc(&((struct bpf_map *)raw)->refcnt); 37 bpf_map_inc(raw, true);
38 break; 38 break;
39 default: 39 default:
40 WARN_ON_ONCE(1); 40 WARN_ON_ONCE(1);
@@ -51,7 +51,7 @@ static void bpf_any_put(void *raw, enum bpf_type type)
51 bpf_prog_put(raw); 51 bpf_prog_put(raw);
52 break; 52 break;
53 case BPF_TYPE_MAP: 53 case BPF_TYPE_MAP:
54 bpf_map_put(raw); 54 bpf_map_put_with_uref(raw);
55 break; 55 break;
56 default: 56 default:
57 WARN_ON_ONCE(1); 57 WARN_ON_ONCE(1);
@@ -64,7 +64,7 @@ static void *bpf_fd_probe_obj(u32 ufd, enum bpf_type *type)
64 void *raw; 64 void *raw;
65 65
66 *type = BPF_TYPE_MAP; 66 *type = BPF_TYPE_MAP;
67 raw = bpf_map_get(ufd); 67 raw = bpf_map_get_with_uref(ufd);
68 if (IS_ERR(raw)) { 68 if (IS_ERR(raw)) {
69 *type = BPF_TYPE_PROG; 69 *type = BPF_TYPE_PROG;
70 raw = bpf_prog_get(ufd); 70 raw = bpf_prog_get(ufd);
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 0d3313d02a7e..3b39550d8485 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -82,6 +82,14 @@ static void bpf_map_free_deferred(struct work_struct *work)
82 map->ops->map_free(map); 82 map->ops->map_free(map);
83} 83}
84 84
85static void bpf_map_put_uref(struct bpf_map *map)
86{
87 if (atomic_dec_and_test(&map->usercnt)) {
88 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
89 bpf_fd_array_map_clear(map);
90 }
91}
92
85/* decrement map refcnt and schedule it for freeing via workqueue 93/* decrement map refcnt and schedule it for freeing via workqueue
86 * (unrelying map implementation ops->map_free() might sleep) 94 * (unrelying map implementation ops->map_free() might sleep)
87 */ 95 */
@@ -93,17 +101,15 @@ void bpf_map_put(struct bpf_map *map)
93 } 101 }
94} 102}
95 103
96static int bpf_map_release(struct inode *inode, struct file *filp) 104void bpf_map_put_with_uref(struct bpf_map *map)
97{ 105{
98 struct bpf_map *map = filp->private_data; 106 bpf_map_put_uref(map);
99
100 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
101 /* prog_array stores refcnt-ed bpf_prog pointers
102 * release them all when user space closes prog_array_fd
103 */
104 bpf_fd_array_map_clear(map);
105
106 bpf_map_put(map); 107 bpf_map_put(map);
108}
109
110static int bpf_map_release(struct inode *inode, struct file *filp)
111{
112 bpf_map_put_with_uref(filp->private_data);
107 return 0; 113 return 0;
108} 114}
109 115
@@ -142,6 +148,7 @@ static int map_create(union bpf_attr *attr)
142 return PTR_ERR(map); 148 return PTR_ERR(map);
143 149
144 atomic_set(&map->refcnt, 1); 150 atomic_set(&map->refcnt, 1);
151 atomic_set(&map->usercnt, 1);
145 152
146 err = bpf_map_charge_memlock(map); 153 err = bpf_map_charge_memlock(map);
147 if (err) 154 if (err)
@@ -174,7 +181,14 @@ struct bpf_map *__bpf_map_get(struct fd f)
174 return f.file->private_data; 181 return f.file->private_data;
175} 182}
176 183
177struct bpf_map *bpf_map_get(u32 ufd) 184void bpf_map_inc(struct bpf_map *map, bool uref)
185{
186 atomic_inc(&map->refcnt);
187 if (uref)
188 atomic_inc(&map->usercnt);
189}
190
191struct bpf_map *bpf_map_get_with_uref(u32 ufd)
178{ 192{
179 struct fd f = fdget(ufd); 193 struct fd f = fdget(ufd);
180 struct bpf_map *map; 194 struct bpf_map *map;
@@ -183,7 +197,7 @@ struct bpf_map *bpf_map_get(u32 ufd)
183 if (IS_ERR(map)) 197 if (IS_ERR(map))
184 return map; 198 return map;
185 199
186 atomic_inc(&map->refcnt); 200 bpf_map_inc(map, true);
187 fdput(f); 201 fdput(f);
188 202
189 return map; 203 return map;
@@ -226,7 +240,7 @@ static int map_lookup_elem(union bpf_attr *attr)
226 goto free_key; 240 goto free_key;
227 241
228 err = -ENOMEM; 242 err = -ENOMEM;
229 value = kmalloc(map->value_size, GFP_USER); 243 value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
230 if (!value) 244 if (!value)
231 goto free_key; 245 goto free_key;
232 246
@@ -285,7 +299,7 @@ static int map_update_elem(union bpf_attr *attr)
285 goto free_key; 299 goto free_key;
286 300
287 err = -ENOMEM; 301 err = -ENOMEM;
288 value = kmalloc(map->value_size, GFP_USER); 302 value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
289 if (!value) 303 if (!value)
290 goto free_key; 304 goto free_key;
291 305
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index c6073056badf..a7945d10b378 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2021,8 +2021,7 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
2021 * will be used by the valid program until it's unloaded 2021 * will be used by the valid program until it's unloaded
2022 * and all maps are released in free_bpf_prog_info() 2022 * and all maps are released in free_bpf_prog_info()
2023 */ 2023 */
2024 atomic_inc(&map->refcnt); 2024 bpf_map_inc(map, false);
2025
2026 fdput(f); 2025 fdput(f);
2027next_insn: 2026next_insn:
2028 insn++; 2027 insn++;
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index 6e5344112419..db545cbcdb89 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -294,6 +294,12 @@ static int klp_write_object_relocations(struct module *pmod,
294 294
295 for (reloc = obj->relocs; reloc->name; reloc++) { 295 for (reloc = obj->relocs; reloc->name; reloc++) {
296 if (!klp_is_module(obj)) { 296 if (!klp_is_module(obj)) {
297
298#if defined(CONFIG_RANDOMIZE_BASE)
299 /* If KASLR has been enabled, adjust old value accordingly */
300 if (kaslr_enabled())
301 reloc->val += kaslr_offset();
302#endif
297 ret = klp_verify_vmlinux_symbol(reloc->name, 303 ret = klp_verify_vmlinux_symbol(reloc->name,
298 reloc->val); 304 reloc->val);
299 if (ret) 305 if (ret)
diff --git a/kernel/panic.c b/kernel/panic.c
index 4579dbb7ed87..4b150bc0c6c1 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -152,8 +152,11 @@ void panic(const char *fmt, ...)
152 * We may have ended up stopping the CPU holding the lock (in 152 * We may have ended up stopping the CPU holding the lock (in
153 * smp_send_stop()) while still having some valuable data in the console 153 * smp_send_stop()) while still having some valuable data in the console
154 * buffer. Try to acquire the lock then release it regardless of the 154 * buffer. Try to acquire the lock then release it regardless of the
155 * result. The release will also print the buffers out. 155 * result. The release will also print the buffers out. Locks debug
156 * should be disabled to avoid reporting bad unlock balance when
157 * panic() is not being callled from OOPS.
156 */ 158 */
159 debug_locks_off();
157 console_trylock(); 160 console_trylock();
158 console_unlock(); 161 console_unlock();
159 162
diff --git a/kernel/pid.c b/kernel/pid.c
index ca368793808e..78b3d9f80d44 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -467,7 +467,7 @@ struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
467 rcu_read_lock(); 467 rcu_read_lock();
468 if (type != PIDTYPE_PID) 468 if (type != PIDTYPE_PID)
469 task = task->group_leader; 469 task = task->group_leader;
470 pid = get_pid(task->pids[type].pid); 470 pid = get_pid(rcu_dereference(task->pids[type].pid));
471 rcu_read_unlock(); 471 rcu_read_unlock();
472 return pid; 472 return pid;
473} 473}
@@ -528,7 +528,7 @@ pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
528 if (likely(pid_alive(task))) { 528 if (likely(pid_alive(task))) {
529 if (type != PIDTYPE_PID) 529 if (type != PIDTYPE_PID)
530 task = task->group_leader; 530 task = task->group_leader;
531 nr = pid_nr_ns(task->pids[type].pid, ns); 531 nr = pid_nr_ns(rcu_dereference(task->pids[type].pid), ns);
532 } 532 }
533 rcu_read_unlock(); 533 rcu_read_unlock();
534 534
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 4d568ac9319e..7063c6a07440 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1947,13 +1947,38 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
1947 1947
1948#ifdef CONFIG_SMP 1948#ifdef CONFIG_SMP
1949 /* 1949 /*
1950 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
1951 * possible to, falsely, observe p->on_cpu == 0.
1952 *
1953 * One must be running (->on_cpu == 1) in order to remove oneself
1954 * from the runqueue.
1955 *
1956 * [S] ->on_cpu = 1; [L] ->on_rq
1957 * UNLOCK rq->lock
1958 * RMB
1959 * LOCK rq->lock
1960 * [S] ->on_rq = 0; [L] ->on_cpu
1961 *
1962 * Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock
1963 * from the consecutive calls to schedule(); the first switching to our
1964 * task, the second putting it to sleep.
1965 */
1966 smp_rmb();
1967
1968 /*
1950 * If the owning (remote) cpu is still in the middle of schedule() with 1969 * If the owning (remote) cpu is still in the middle of schedule() with
1951 * this task as prev, wait until its done referencing the task. 1970 * this task as prev, wait until its done referencing the task.
1952 */ 1971 */
1953 while (p->on_cpu) 1972 while (p->on_cpu)
1954 cpu_relax(); 1973 cpu_relax();
1955 /* 1974 /*
1956 * Pairs with the smp_wmb() in finish_lock_switch(). 1975 * Combined with the control dependency above, we have an effective
1976 * smp_load_acquire() without the need for full barriers.
1977 *
1978 * Pairs with the smp_store_release() in finish_lock_switch().
1979 *
1980 * This ensures that tasks getting woken will be fully ordered against
1981 * their previous state and preserve Program Order.
1957 */ 1982 */
1958 smp_rmb(); 1983 smp_rmb();
1959 1984
@@ -2039,7 +2064,6 @@ out:
2039 */ 2064 */
2040int wake_up_process(struct task_struct *p) 2065int wake_up_process(struct task_struct *p)
2041{ 2066{
2042 WARN_ON(task_is_stopped_or_traced(p));
2043 return try_to_wake_up(p, TASK_NORMAL, 0); 2067 return try_to_wake_up(p, TASK_NORMAL, 0);
2044} 2068}
2045EXPORT_SYMBOL(wake_up_process); 2069EXPORT_SYMBOL(wake_up_process);
@@ -5847,13 +5871,13 @@ static int init_rootdomain(struct root_domain *rd)
5847{ 5871{
5848 memset(rd, 0, sizeof(*rd)); 5872 memset(rd, 0, sizeof(*rd));
5849 5873
5850 if (!alloc_cpumask_var(&rd->span, GFP_KERNEL)) 5874 if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
5851 goto out; 5875 goto out;
5852 if (!alloc_cpumask_var(&rd->online, GFP_KERNEL)) 5876 if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))
5853 goto free_span; 5877 goto free_span;
5854 if (!alloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) 5878 if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
5855 goto free_online; 5879 goto free_online;
5856 if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) 5880 if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
5857 goto free_dlo_mask; 5881 goto free_dlo_mask;
5858 5882
5859 init_dl_bw(&rd->dl_bw); 5883 init_dl_bw(&rd->dl_bw);
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 26a54461bf59..05de80b48586 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -788,6 +788,9 @@ cputime_t task_gtime(struct task_struct *t)
788 unsigned int seq; 788 unsigned int seq;
789 cputime_t gtime; 789 cputime_t gtime;
790 790
791 if (!context_tracking_is_enabled())
792 return t->gtime;
793
791 do { 794 do {
792 seq = read_seqbegin(&t->vtime_seqlock); 795 seq = read_seqbegin(&t->vtime_seqlock);
793 796
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index e3cc16312046..8ec86abe0ea1 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -64,7 +64,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
64 raw_spin_unlock(&rt_b->rt_runtime_lock); 64 raw_spin_unlock(&rt_b->rt_runtime_lock);
65} 65}
66 66
67#ifdef CONFIG_SMP 67#if defined(CONFIG_SMP) && defined(HAVE_RT_PUSH_IPI)
68static void push_irq_work_func(struct irq_work *work); 68static void push_irq_work_func(struct irq_work *work);
69#endif 69#endif
70 70
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index efd3bfc7e347..b242775bf670 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1073,6 +1073,9 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
1073 * We must ensure this doesn't happen until the switch is completely 1073 * We must ensure this doesn't happen until the switch is completely
1074 * finished. 1074 * finished.
1075 * 1075 *
1076 * In particular, the load of prev->state in finish_task_switch() must
1077 * happen before this.
1078 *
1076 * Pairs with the control dependency and rmb in try_to_wake_up(). 1079 * Pairs with the control dependency and rmb in try_to_wake_up().
1077 */ 1080 */
1078 smp_store_release(&prev->on_cpu, 0); 1081 smp_store_release(&prev->on_cpu, 0);
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 052e02672d12..f10bd873e684 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -583,18 +583,18 @@ EXPORT_SYMBOL(wake_up_atomic_t);
583 583
584__sched int bit_wait(struct wait_bit_key *word) 584__sched int bit_wait(struct wait_bit_key *word)
585{ 585{
586 if (signal_pending_state(current->state, current))
587 return 1;
588 schedule(); 586 schedule();
587 if (signal_pending(current))
588 return -EINTR;
589 return 0; 589 return 0;
590} 590}
591EXPORT_SYMBOL(bit_wait); 591EXPORT_SYMBOL(bit_wait);
592 592
593__sched int bit_wait_io(struct wait_bit_key *word) 593__sched int bit_wait_io(struct wait_bit_key *word)
594{ 594{
595 if (signal_pending_state(current->state, current))
596 return 1;
597 io_schedule(); 595 io_schedule();
596 if (signal_pending(current))
597 return -EINTR;
598 return 0; 598 return 0;
599} 599}
600EXPORT_SYMBOL(bit_wait_io); 600EXPORT_SYMBOL(bit_wait_io);
@@ -602,11 +602,11 @@ EXPORT_SYMBOL(bit_wait_io);
602__sched int bit_wait_timeout(struct wait_bit_key *word) 602__sched int bit_wait_timeout(struct wait_bit_key *word)
603{ 603{
604 unsigned long now = READ_ONCE(jiffies); 604 unsigned long now = READ_ONCE(jiffies);
605 if (signal_pending_state(current->state, current))
606 return 1;
607 if (time_after_eq(now, word->timeout)) 605 if (time_after_eq(now, word->timeout))
608 return -EAGAIN; 606 return -EAGAIN;
609 schedule_timeout(word->timeout - now); 607 schedule_timeout(word->timeout - now);
608 if (signal_pending(current))
609 return -EINTR;
610 return 0; 610 return 0;
611} 611}
612EXPORT_SYMBOL_GPL(bit_wait_timeout); 612EXPORT_SYMBOL_GPL(bit_wait_timeout);
@@ -614,11 +614,11 @@ EXPORT_SYMBOL_GPL(bit_wait_timeout);
614__sched int bit_wait_io_timeout(struct wait_bit_key *word) 614__sched int bit_wait_io_timeout(struct wait_bit_key *word)
615{ 615{
616 unsigned long now = READ_ONCE(jiffies); 616 unsigned long now = READ_ONCE(jiffies);
617 if (signal_pending_state(current->state, current))
618 return 1;
619 if (time_after_eq(now, word->timeout)) 617 if (time_after_eq(now, word->timeout))
620 return -EAGAIN; 618 return -EAGAIN;
621 io_schedule_timeout(word->timeout - now); 619 io_schedule_timeout(word->timeout - now);
620 if (signal_pending(current))
621 return -EINTR;
622 return 0; 622 return 0;
623} 623}
624EXPORT_SYMBOL_GPL(bit_wait_io_timeout); 624EXPORT_SYMBOL_GPL(bit_wait_io_timeout);
diff --git a/kernel/signal.c b/kernel/signal.c
index c0b01fe24bbd..f3f1f7a972fd 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -3503,7 +3503,7 @@ SYSCALL_DEFINE0(pause)
3503 3503
3504#endif 3504#endif
3505 3505
3506int sigsuspend(sigset_t *set) 3506static int sigsuspend(sigset_t *set)
3507{ 3507{
3508 current->saved_sigmask = current->blocked; 3508 current->saved_sigmask = current->blocked;
3509 set_current_blocked(set); 3509 set_current_blocked(set);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 75f1d05ea82d..9c6045a27ba3 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1887,12 +1887,6 @@ rb_event_index(struct ring_buffer_event *event)
1887 return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE; 1887 return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
1888} 1888}
1889 1889
1890static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1891{
1892 cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
1893 cpu_buffer->reader_page->read = 0;
1894}
1895
1896static void rb_inc_iter(struct ring_buffer_iter *iter) 1890static void rb_inc_iter(struct ring_buffer_iter *iter)
1897{ 1891{
1898 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 1892 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
@@ -2803,8 +2797,11 @@ rb_reserve_next_event(struct ring_buffer *buffer,
2803 2797
2804 event = __rb_reserve_next(cpu_buffer, &info); 2798 event = __rb_reserve_next(cpu_buffer, &info);
2805 2799
2806 if (unlikely(PTR_ERR(event) == -EAGAIN)) 2800 if (unlikely(PTR_ERR(event) == -EAGAIN)) {
2801 if (info.add_timestamp)
2802 info.length -= RB_LEN_TIME_EXTEND;
2807 goto again; 2803 goto again;
2804 }
2808 2805
2809 if (!event) 2806 if (!event)
2810 goto out_fail; 2807 goto out_fail;
@@ -3626,7 +3623,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
3626 3623
3627 /* Finally update the reader page to the new head */ 3624 /* Finally update the reader page to the new head */
3628 cpu_buffer->reader_page = reader; 3625 cpu_buffer->reader_page = reader;
3629 rb_reset_reader_page(cpu_buffer); 3626 cpu_buffer->reader_page->read = 0;
3630 3627
3631 if (overwrite != cpu_buffer->last_overrun) { 3628 if (overwrite != cpu_buffer->last_overrun) {
3632 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; 3629 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
@@ -3636,6 +3633,10 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
3636 goto again; 3633 goto again;
3637 3634
3638 out: 3635 out:
3636 /* Update the read_stamp on the first event */
3637 if (reader && reader->read == 0)
3638 cpu_buffer->read_stamp = reader->page->time_stamp;
3639
3639 arch_spin_unlock(&cpu_buffer->lock); 3640 arch_spin_unlock(&cpu_buffer->lock);
3640 local_irq_restore(flags); 3641 local_irq_restore(flags);
3641 3642
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 6bbc5f652355..4f6ef6912e00 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -582,6 +582,12 @@ static void __ftrace_clear_event_pids(struct trace_array *tr)
582 unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr); 582 unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr);
583 unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr); 583 unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr);
584 584
585 unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, tr);
586 unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, tr);
587
588 unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr);
589 unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr);
590
585 list_for_each_entry(file, &tr->events, list) { 591 list_for_each_entry(file, &tr->events, list) {
586 clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags); 592 clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
587 } 593 }
@@ -1729,6 +1735,16 @@ ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
1729 tr, INT_MAX); 1735 tr, INT_MAX);
1730 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, 1736 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post,
1731 tr, 0); 1737 tr, 0);
1738
1739 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre,
1740 tr, INT_MAX);
1741 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post,
1742 tr, 0);
1743
1744 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre,
1745 tr, INT_MAX);
1746 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post,
1747 tr, 0);
1732 } 1748 }
1733 1749
1734 /* 1750 /*
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index c29ddebc8705..62fe06bb7d04 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2009,7 +2009,7 @@ int hugepage_madvise(struct vm_area_struct *vma,
2009 /* 2009 /*
2010 * Be somewhat over-protective like KSM for now! 2010 * Be somewhat over-protective like KSM for now!
2011 */ 2011 */
2012 if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP)) 2012 if (*vm_flags & VM_NO_THP)
2013 return -EINVAL; 2013 return -EINVAL;
2014 *vm_flags &= ~VM_NOHUGEPAGE; 2014 *vm_flags &= ~VM_NOHUGEPAGE;
2015 *vm_flags |= VM_HUGEPAGE; 2015 *vm_flags |= VM_HUGEPAGE;
@@ -2025,7 +2025,7 @@ int hugepage_madvise(struct vm_area_struct *vma,
2025 /* 2025 /*
2026 * Be somewhat over-protective like KSM for now! 2026 * Be somewhat over-protective like KSM for now!
2027 */ 2027 */
2028 if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP)) 2028 if (*vm_flags & VM_NO_THP)
2029 return -EINVAL; 2029 return -EINVAL;
2030 *vm_flags &= ~VM_HUGEPAGE; 2030 *vm_flags &= ~VM_HUGEPAGE;
2031 *vm_flags |= VM_NOHUGEPAGE; 2031 *vm_flags |= VM_NOHUGEPAGE;
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index d41b21bce6a0..bc0a8d8b8f42 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -19,6 +19,7 @@
19#include <linux/export.h> 19#include <linux/export.h>
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/kmemleak.h>
22#include <linux/memblock.h> 23#include <linux/memblock.h>
23#include <linux/memory.h> 24#include <linux/memory.h>
24#include <linux/mm.h> 25#include <linux/mm.h>
@@ -444,6 +445,7 @@ int kasan_module_alloc(void *addr, size_t size)
444 445
445 if (ret) { 446 if (ret) {
446 find_vm_area(addr)->flags |= VM_KASAN; 447 find_vm_area(addr)->flags |= VM_KASAN;
448 kmemleak_ignore(ret);
447 return 0; 449 return 0;
448 } 450 }
449 451
diff --git a/mm/memory.c b/mm/memory.c
index deb679c31f2a..c387430f06c3 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3015,9 +3015,9 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3015 } else { 3015 } else {
3016 /* 3016 /*
3017 * The fault handler has no page to lock, so it holds 3017 * The fault handler has no page to lock, so it holds
3018 * i_mmap_lock for write to protect against truncate. 3018 * i_mmap_lock for read to protect against truncate.
3019 */ 3019 */
3020 i_mmap_unlock_write(vma->vm_file->f_mapping); 3020 i_mmap_unlock_read(vma->vm_file->f_mapping);
3021 } 3021 }
3022 goto uncharge_out; 3022 goto uncharge_out;
3023 } 3023 }
@@ -3031,9 +3031,9 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3031 } else { 3031 } else {
3032 /* 3032 /*
3033 * The fault handler has no page to lock, so it holds 3033 * The fault handler has no page to lock, so it holds
3034 * i_mmap_lock for write to protect against truncate. 3034 * i_mmap_lock for read to protect against truncate.
3035 */ 3035 */
3036 i_mmap_unlock_write(vma->vm_file->f_mapping); 3036 i_mmap_unlock_read(vma->vm_file->f_mapping);
3037 } 3037 }
3038 return ret; 3038 return ret;
3039uncharge_out: 3039uncharge_out:
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 2c90357c34ea..3e4d65445fa7 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1542,7 +1542,9 @@ static void balance_dirty_pages(struct address_space *mapping,
1542 for (;;) { 1542 for (;;) {
1543 unsigned long now = jiffies; 1543 unsigned long now = jiffies;
1544 unsigned long dirty, thresh, bg_thresh; 1544 unsigned long dirty, thresh, bg_thresh;
1545 unsigned long m_dirty, m_thresh, m_bg_thresh; 1545 unsigned long m_dirty = 0; /* stop bogus uninit warnings */
1546 unsigned long m_thresh = 0;
1547 unsigned long m_bg_thresh = 0;
1546 1548
1547 /* 1549 /*
1548 * Unstable writes are a feature of certain networked 1550 * Unstable writes are a feature of certain networked
diff --git a/mm/slab.c b/mm/slab.c
index e0819fa96559..4765c97ce690 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3419,7 +3419,7 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
3419} 3419}
3420EXPORT_SYMBOL(kmem_cache_free_bulk); 3420EXPORT_SYMBOL(kmem_cache_free_bulk);
3421 3421
3422bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 3422int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3423 void **p) 3423 void **p)
3424{ 3424{
3425 return __kmem_cache_alloc_bulk(s, flags, size, p); 3425 return __kmem_cache_alloc_bulk(s, flags, size, p);
diff --git a/mm/slab.h b/mm/slab.h
index 27492eb678f7..7b6087197997 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -170,7 +170,7 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
170 * may be allocated or freed using these operations. 170 * may be allocated or freed using these operations.
171 */ 171 */
172void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); 172void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
173bool __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); 173int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
174 174
175#ifdef CONFIG_MEMCG_KMEM 175#ifdef CONFIG_MEMCG_KMEM
176/* 176/*
diff --git a/mm/slab_common.c b/mm/slab_common.c
index d88e97c10a2e..3c6a86b4ec25 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -112,7 +112,7 @@ void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
112 kmem_cache_free(s, p[i]); 112 kmem_cache_free(s, p[i]);
113} 113}
114 114
115bool __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, 115int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
116 void **p) 116 void **p)
117{ 117{
118 size_t i; 118 size_t i;
@@ -121,10 +121,10 @@ bool __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
121 void *x = p[i] = kmem_cache_alloc(s, flags); 121 void *x = p[i] = kmem_cache_alloc(s, flags);
122 if (!x) { 122 if (!x) {
123 __kmem_cache_free_bulk(s, i, p); 123 __kmem_cache_free_bulk(s, i, p);
124 return false; 124 return 0;
125 } 125 }
126 } 126 }
127 return true; 127 return i;
128} 128}
129 129
130#ifdef CONFIG_MEMCG_KMEM 130#ifdef CONFIG_MEMCG_KMEM
diff --git a/mm/slob.c b/mm/slob.c
index 0d7e5df74d1f..17e8f8cc7c53 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -617,7 +617,7 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
617} 617}
618EXPORT_SYMBOL(kmem_cache_free_bulk); 618EXPORT_SYMBOL(kmem_cache_free_bulk);
619 619
620bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 620int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
621 void **p) 621 void **p)
622{ 622{
623 return __kmem_cache_alloc_bulk(s, flags, size, p); 623 return __kmem_cache_alloc_bulk(s, flags, size, p);
diff --git a/mm/slub.c b/mm/slub.c
index 7cb4bf9ae320..46997517406e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1065,11 +1065,15 @@ bad:
1065 return 0; 1065 return 0;
1066} 1066}
1067 1067
1068/* Supports checking bulk free of a constructed freelist */
1068static noinline struct kmem_cache_node *free_debug_processing( 1069static noinline struct kmem_cache_node *free_debug_processing(
1069 struct kmem_cache *s, struct page *page, void *object, 1070 struct kmem_cache *s, struct page *page,
1071 void *head, void *tail, int bulk_cnt,
1070 unsigned long addr, unsigned long *flags) 1072 unsigned long addr, unsigned long *flags)
1071{ 1073{
1072 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1074 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1075 void *object = head;
1076 int cnt = 0;
1073 1077
1074 spin_lock_irqsave(&n->list_lock, *flags); 1078 spin_lock_irqsave(&n->list_lock, *flags);
1075 slab_lock(page); 1079 slab_lock(page);
@@ -1077,6 +1081,9 @@ static noinline struct kmem_cache_node *free_debug_processing(
1077 if (!check_slab(s, page)) 1081 if (!check_slab(s, page))
1078 goto fail; 1082 goto fail;
1079 1083
1084next_object:
1085 cnt++;
1086
1080 if (!check_valid_pointer(s, page, object)) { 1087 if (!check_valid_pointer(s, page, object)) {
1081 slab_err(s, page, "Invalid object pointer 0x%p", object); 1088 slab_err(s, page, "Invalid object pointer 0x%p", object);
1082 goto fail; 1089 goto fail;
@@ -1107,8 +1114,19 @@ static noinline struct kmem_cache_node *free_debug_processing(
1107 if (s->flags & SLAB_STORE_USER) 1114 if (s->flags & SLAB_STORE_USER)
1108 set_track(s, object, TRACK_FREE, addr); 1115 set_track(s, object, TRACK_FREE, addr);
1109 trace(s, page, object, 0); 1116 trace(s, page, object, 0);
1117 /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
1110 init_object(s, object, SLUB_RED_INACTIVE); 1118 init_object(s, object, SLUB_RED_INACTIVE);
1119
1120 /* Reached end of constructed freelist yet? */
1121 if (object != tail) {
1122 object = get_freepointer(s, object);
1123 goto next_object;
1124 }
1111out: 1125out:
1126 if (cnt != bulk_cnt)
1127 slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n",
1128 bulk_cnt, cnt);
1129
1112 slab_unlock(page); 1130 slab_unlock(page);
1113 /* 1131 /*
1114 * Keep node_lock to preserve integrity 1132 * Keep node_lock to preserve integrity
@@ -1204,7 +1222,7 @@ unsigned long kmem_cache_flags(unsigned long object_size,
1204 1222
1205 return flags; 1223 return flags;
1206} 1224}
1207#else 1225#else /* !CONFIG_SLUB_DEBUG */
1208static inline void setup_object_debug(struct kmem_cache *s, 1226static inline void setup_object_debug(struct kmem_cache *s,
1209 struct page *page, void *object) {} 1227 struct page *page, void *object) {}
1210 1228
@@ -1212,7 +1230,8 @@ static inline int alloc_debug_processing(struct kmem_cache *s,
1212 struct page *page, void *object, unsigned long addr) { return 0; } 1230 struct page *page, void *object, unsigned long addr) { return 0; }
1213 1231
1214static inline struct kmem_cache_node *free_debug_processing( 1232static inline struct kmem_cache_node *free_debug_processing(
1215 struct kmem_cache *s, struct page *page, void *object, 1233 struct kmem_cache *s, struct page *page,
1234 void *head, void *tail, int bulk_cnt,
1216 unsigned long addr, unsigned long *flags) { return NULL; } 1235 unsigned long addr, unsigned long *flags) { return NULL; }
1217 1236
1218static inline int slab_pad_check(struct kmem_cache *s, struct page *page) 1237static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
@@ -1273,14 +1292,21 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
1273 return memcg_kmem_get_cache(s, flags); 1292 return memcg_kmem_get_cache(s, flags);
1274} 1293}
1275 1294
1276static inline void slab_post_alloc_hook(struct kmem_cache *s, 1295static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
1277 gfp_t flags, void *object) 1296 size_t size, void **p)
1278{ 1297{
1298 size_t i;
1299
1279 flags &= gfp_allowed_mask; 1300 flags &= gfp_allowed_mask;
1280 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); 1301 for (i = 0; i < size; i++) {
1281 kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags); 1302 void *object = p[i];
1303
1304 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
1305 kmemleak_alloc_recursive(object, s->object_size, 1,
1306 s->flags, flags);
1307 kasan_slab_alloc(s, object);
1308 }
1282 memcg_kmem_put_cache(s); 1309 memcg_kmem_put_cache(s);
1283 kasan_slab_alloc(s, object);
1284} 1310}
1285 1311
1286static inline void slab_free_hook(struct kmem_cache *s, void *x) 1312static inline void slab_free_hook(struct kmem_cache *s, void *x)
@@ -1308,6 +1334,29 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
1308 kasan_slab_free(s, x); 1334 kasan_slab_free(s, x);
1309} 1335}
1310 1336
1337static inline void slab_free_freelist_hook(struct kmem_cache *s,
1338 void *head, void *tail)
1339{
1340/*
1341 * Compiler cannot detect this function can be removed if slab_free_hook()
1342 * evaluates to nothing. Thus, catch all relevant config debug options here.
1343 */
1344#if defined(CONFIG_KMEMCHECK) || \
1345 defined(CONFIG_LOCKDEP) || \
1346 defined(CONFIG_DEBUG_KMEMLEAK) || \
1347 defined(CONFIG_DEBUG_OBJECTS_FREE) || \
1348 defined(CONFIG_KASAN)
1349
1350 void *object = head;
1351 void *tail_obj = tail ? : head;
1352
1353 do {
1354 slab_free_hook(s, object);
1355 } while ((object != tail_obj) &&
1356 (object = get_freepointer(s, object)));
1357#endif
1358}
1359
1311static void setup_object(struct kmem_cache *s, struct page *page, 1360static void setup_object(struct kmem_cache *s, struct page *page,
1312 void *object) 1361 void *object)
1313{ 1362{
@@ -2295,23 +2344,15 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
2295 * And if we were unable to get a new slab from the partial slab lists then 2344 * And if we were unable to get a new slab from the partial slab lists then
2296 * we need to allocate a new slab. This is the slowest path since it involves 2345 * we need to allocate a new slab. This is the slowest path since it involves
2297 * a call to the page allocator and the setup of a new slab. 2346 * a call to the page allocator and the setup of a new slab.
2347 *
2348 * Version of __slab_alloc to use when we know that interrupts are
2349 * already disabled (which is the case for bulk allocation).
2298 */ 2350 */
2299static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 2351static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2300 unsigned long addr, struct kmem_cache_cpu *c) 2352 unsigned long addr, struct kmem_cache_cpu *c)
2301{ 2353{
2302 void *freelist; 2354 void *freelist;
2303 struct page *page; 2355 struct page *page;
2304 unsigned long flags;
2305
2306 local_irq_save(flags);
2307#ifdef CONFIG_PREEMPT
2308 /*
2309 * We may have been preempted and rescheduled on a different
2310 * cpu before disabling interrupts. Need to reload cpu area
2311 * pointer.
2312 */
2313 c = this_cpu_ptr(s->cpu_slab);
2314#endif
2315 2356
2316 page = c->page; 2357 page = c->page;
2317 if (!page) 2358 if (!page)
@@ -2369,7 +2410,6 @@ load_freelist:
2369 VM_BUG_ON(!c->page->frozen); 2410 VM_BUG_ON(!c->page->frozen);
2370 c->freelist = get_freepointer(s, freelist); 2411 c->freelist = get_freepointer(s, freelist);
2371 c->tid = next_tid(c->tid); 2412 c->tid = next_tid(c->tid);
2372 local_irq_restore(flags);
2373 return freelist; 2413 return freelist;
2374 2414
2375new_slab: 2415new_slab:
@@ -2386,7 +2426,6 @@ new_slab:
2386 2426
2387 if (unlikely(!freelist)) { 2427 if (unlikely(!freelist)) {
2388 slab_out_of_memory(s, gfpflags, node); 2428 slab_out_of_memory(s, gfpflags, node);
2389 local_irq_restore(flags);
2390 return NULL; 2429 return NULL;
2391 } 2430 }
2392 2431
@@ -2402,11 +2441,35 @@ new_slab:
2402 deactivate_slab(s, page, get_freepointer(s, freelist)); 2441 deactivate_slab(s, page, get_freepointer(s, freelist));
2403 c->page = NULL; 2442 c->page = NULL;
2404 c->freelist = NULL; 2443 c->freelist = NULL;
2405 local_irq_restore(flags);
2406 return freelist; 2444 return freelist;
2407} 2445}
2408 2446
2409/* 2447/*
2448 * Another one that disabled interrupt and compensates for possible
2449 * cpu changes by refetching the per cpu area pointer.
2450 */
2451static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2452 unsigned long addr, struct kmem_cache_cpu *c)
2453{
2454 void *p;
2455 unsigned long flags;
2456
2457 local_irq_save(flags);
2458#ifdef CONFIG_PREEMPT
2459 /*
2460 * We may have been preempted and rescheduled on a different
2461 * cpu before disabling interrupts. Need to reload cpu area
2462 * pointer.
2463 */
2464 c = this_cpu_ptr(s->cpu_slab);
2465#endif
2466
2467 p = ___slab_alloc(s, gfpflags, node, addr, c);
2468 local_irq_restore(flags);
2469 return p;
2470}
2471
2472/*
2410 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) 2473 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
2411 * have the fastpath folded into their functions. So no function call 2474 * have the fastpath folded into their functions. So no function call
2412 * overhead for requests that can be satisfied on the fastpath. 2475 * overhead for requests that can be satisfied on the fastpath.
@@ -2419,7 +2482,7 @@ new_slab:
2419static __always_inline void *slab_alloc_node(struct kmem_cache *s, 2482static __always_inline void *slab_alloc_node(struct kmem_cache *s,
2420 gfp_t gfpflags, int node, unsigned long addr) 2483 gfp_t gfpflags, int node, unsigned long addr)
2421{ 2484{
2422 void **object; 2485 void *object;
2423 struct kmem_cache_cpu *c; 2486 struct kmem_cache_cpu *c;
2424 struct page *page; 2487 struct page *page;
2425 unsigned long tid; 2488 unsigned long tid;
@@ -2498,7 +2561,7 @@ redo:
2498 if (unlikely(gfpflags & __GFP_ZERO) && object) 2561 if (unlikely(gfpflags & __GFP_ZERO) && object)
2499 memset(object, 0, s->object_size); 2562 memset(object, 0, s->object_size);
2500 2563
2501 slab_post_alloc_hook(s, gfpflags, object); 2564 slab_post_alloc_hook(s, gfpflags, 1, &object);
2502 2565
2503 return object; 2566 return object;
2504} 2567}
@@ -2569,10 +2632,11 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
2569 * handling required then we can return immediately. 2632 * handling required then we can return immediately.
2570 */ 2633 */
2571static void __slab_free(struct kmem_cache *s, struct page *page, 2634static void __slab_free(struct kmem_cache *s, struct page *page,
2572 void *x, unsigned long addr) 2635 void *head, void *tail, int cnt,
2636 unsigned long addr)
2637
2573{ 2638{
2574 void *prior; 2639 void *prior;
2575 void **object = (void *)x;
2576 int was_frozen; 2640 int was_frozen;
2577 struct page new; 2641 struct page new;
2578 unsigned long counters; 2642 unsigned long counters;
@@ -2582,7 +2646,8 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2582 stat(s, FREE_SLOWPATH); 2646 stat(s, FREE_SLOWPATH);
2583 2647
2584 if (kmem_cache_debug(s) && 2648 if (kmem_cache_debug(s) &&
2585 !(n = free_debug_processing(s, page, x, addr, &flags))) 2649 !(n = free_debug_processing(s, page, head, tail, cnt,
2650 addr, &flags)))
2586 return; 2651 return;
2587 2652
2588 do { 2653 do {
@@ -2592,10 +2657,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2592 } 2657 }
2593 prior = page->freelist; 2658 prior = page->freelist;
2594 counters = page->counters; 2659 counters = page->counters;
2595 set_freepointer(s, object, prior); 2660 set_freepointer(s, tail, prior);
2596 new.counters = counters; 2661 new.counters = counters;
2597 was_frozen = new.frozen; 2662 was_frozen = new.frozen;
2598 new.inuse--; 2663 new.inuse -= cnt;
2599 if ((!new.inuse || !prior) && !was_frozen) { 2664 if ((!new.inuse || !prior) && !was_frozen) {
2600 2665
2601 if (kmem_cache_has_cpu_partial(s) && !prior) { 2666 if (kmem_cache_has_cpu_partial(s) && !prior) {
@@ -2626,7 +2691,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2626 2691
2627 } while (!cmpxchg_double_slab(s, page, 2692 } while (!cmpxchg_double_slab(s, page,
2628 prior, counters, 2693 prior, counters,
2629 object, new.counters, 2694 head, new.counters,
2630 "__slab_free")); 2695 "__slab_free"));
2631 2696
2632 if (likely(!n)) { 2697 if (likely(!n)) {
@@ -2691,15 +2756,20 @@ slab_empty:
2691 * 2756 *
2692 * If fastpath is not possible then fall back to __slab_free where we deal 2757 * If fastpath is not possible then fall back to __slab_free where we deal
2693 * with all sorts of special processing. 2758 * with all sorts of special processing.
2759 *
2760 * Bulk free of a freelist with several objects (all pointing to the
2761 * same page) possible by specifying head and tail ptr, plus objects
2762 * count (cnt). Bulk free indicated by tail pointer being set.
2694 */ 2763 */
2695static __always_inline void slab_free(struct kmem_cache *s, 2764static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
2696 struct page *page, void *x, unsigned long addr) 2765 void *head, void *tail, int cnt,
2766 unsigned long addr)
2697{ 2767{
2698 void **object = (void *)x; 2768 void *tail_obj = tail ? : head;
2699 struct kmem_cache_cpu *c; 2769 struct kmem_cache_cpu *c;
2700 unsigned long tid; 2770 unsigned long tid;
2701 2771
2702 slab_free_hook(s, x); 2772 slab_free_freelist_hook(s, head, tail);
2703 2773
2704redo: 2774redo:
2705 /* 2775 /*
@@ -2718,19 +2788,19 @@ redo:
2718 barrier(); 2788 barrier();
2719 2789
2720 if (likely(page == c->page)) { 2790 if (likely(page == c->page)) {
2721 set_freepointer(s, object, c->freelist); 2791 set_freepointer(s, tail_obj, c->freelist);
2722 2792
2723 if (unlikely(!this_cpu_cmpxchg_double( 2793 if (unlikely(!this_cpu_cmpxchg_double(
2724 s->cpu_slab->freelist, s->cpu_slab->tid, 2794 s->cpu_slab->freelist, s->cpu_slab->tid,
2725 c->freelist, tid, 2795 c->freelist, tid,
2726 object, next_tid(tid)))) { 2796 head, next_tid(tid)))) {
2727 2797
2728 note_cmpxchg_failure("slab_free", s, tid); 2798 note_cmpxchg_failure("slab_free", s, tid);
2729 goto redo; 2799 goto redo;
2730 } 2800 }
2731 stat(s, FREE_FASTPATH); 2801 stat(s, FREE_FASTPATH);
2732 } else 2802 } else
2733 __slab_free(s, page, x, addr); 2803 __slab_free(s, page, head, tail_obj, cnt, addr);
2734 2804
2735} 2805}
2736 2806
@@ -2739,59 +2809,116 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
2739 s = cache_from_obj(s, x); 2809 s = cache_from_obj(s, x);
2740 if (!s) 2810 if (!s)
2741 return; 2811 return;
2742 slab_free(s, virt_to_head_page(x), x, _RET_IP_); 2812 slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_);
2743 trace_kmem_cache_free(_RET_IP_, x); 2813 trace_kmem_cache_free(_RET_IP_, x);
2744} 2814}
2745EXPORT_SYMBOL(kmem_cache_free); 2815EXPORT_SYMBOL(kmem_cache_free);
2746 2816
2747/* Note that interrupts must be enabled when calling this function. */ 2817struct detached_freelist {
2748void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
2749{
2750 struct kmem_cache_cpu *c;
2751 struct page *page; 2818 struct page *page;
2752 int i; 2819 void *tail;
2820 void *freelist;
2821 int cnt;
2822};
2753 2823
2754 local_irq_disable(); 2824/*
2755 c = this_cpu_ptr(s->cpu_slab); 2825 * This function progressively scans the array with free objects (with
2826 * a limited look ahead) and extract objects belonging to the same
2827 * page. It builds a detached freelist directly within the given
2828 * page/objects. This can happen without any need for
2829 * synchronization, because the objects are owned by running process.
2830 * The freelist is build up as a single linked list in the objects.
2831 * The idea is, that this detached freelist can then be bulk
2832 * transferred to the real freelist(s), but only requiring a single
2833 * synchronization primitive. Look ahead in the array is limited due
2834 * to performance reasons.
2835 */
2836static int build_detached_freelist(struct kmem_cache *s, size_t size,
2837 void **p, struct detached_freelist *df)
2838{
2839 size_t first_skipped_index = 0;
2840 int lookahead = 3;
2841 void *object;
2756 2842
2757 for (i = 0; i < size; i++) { 2843 /* Always re-init detached_freelist */
2758 void *object = p[i]; 2844 df->page = NULL;
2759 2845
2760 BUG_ON(!object); 2846 do {
2761 /* kmem cache debug support */ 2847 object = p[--size];
2762 s = cache_from_obj(s, object); 2848 } while (!object && size);
2763 if (unlikely(!s))
2764 goto exit;
2765 slab_free_hook(s, object);
2766 2849
2767 page = virt_to_head_page(object); 2850 if (!object)
2851 return 0;
2768 2852
2769 if (c->page == page) { 2853 /* Start new detached freelist */
2770 /* Fastpath: local CPU free */ 2854 set_freepointer(s, object, NULL);
2771 set_freepointer(s, object, c->freelist); 2855 df->page = virt_to_head_page(object);
2772 c->freelist = object; 2856 df->tail = object;
2773 } else { 2857 df->freelist = object;
2774 c->tid = next_tid(c->tid); 2858 p[size] = NULL; /* mark object processed */
2775 local_irq_enable(); 2859 df->cnt = 1;
2776 /* Slowpath: overhead locked cmpxchg_double_slab */ 2860
2777 __slab_free(s, page, object, _RET_IP_); 2861 while (size) {
2778 local_irq_disable(); 2862 object = p[--size];
2779 c = this_cpu_ptr(s->cpu_slab); 2863 if (!object)
2864 continue; /* Skip processed objects */
2865
2866 /* df->page is always set at this point */
2867 if (df->page == virt_to_head_page(object)) {
2868 /* Opportunity build freelist */
2869 set_freepointer(s, object, df->freelist);
2870 df->freelist = object;
2871 df->cnt++;
2872 p[size] = NULL; /* mark object processed */
2873
2874 continue;
2780 } 2875 }
2876
2877 /* Limit look ahead search */
2878 if (!--lookahead)
2879 break;
2880
2881 if (!first_skipped_index)
2882 first_skipped_index = size + 1;
2781 } 2883 }
2782exit: 2884
2783 c->tid = next_tid(c->tid); 2885 return first_skipped_index;
2784 local_irq_enable(); 2886}
2887
2888
2889/* Note that interrupts must be enabled when calling this function. */
2890void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
2891{
2892 if (WARN_ON(!size))
2893 return;
2894
2895 do {
2896 struct detached_freelist df;
2897 struct kmem_cache *s;
2898
2899 /* Support for memcg */
2900 s = cache_from_obj(orig_s, p[size - 1]);
2901
2902 size = build_detached_freelist(s, size, p, &df);
2903 if (unlikely(!df.page))
2904 continue;
2905
2906 slab_free(s, df.page, df.freelist, df.tail, df.cnt, _RET_IP_);
2907 } while (likely(size));
2785} 2908}
2786EXPORT_SYMBOL(kmem_cache_free_bulk); 2909EXPORT_SYMBOL(kmem_cache_free_bulk);
2787 2910
2788/* Note that interrupts must be enabled when calling this function. */ 2911/* Note that interrupts must be enabled when calling this function. */
2789bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 2912int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
2790 void **p) 2913 void **p)
2791{ 2914{
2792 struct kmem_cache_cpu *c; 2915 struct kmem_cache_cpu *c;
2793 int i; 2916 int i;
2794 2917
2918 /* memcg and kmem_cache debug support */
2919 s = slab_pre_alloc_hook(s, flags);
2920 if (unlikely(!s))
2921 return false;
2795 /* 2922 /*
2796 * Drain objects in the per cpu slab, while disabling local 2923 * Drain objects in the per cpu slab, while disabling local
2797 * IRQs, which protects against PREEMPT and interrupts 2924 * IRQs, which protects against PREEMPT and interrupts
@@ -2804,36 +2931,20 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
2804 void *object = c->freelist; 2931 void *object = c->freelist;
2805 2932
2806 if (unlikely(!object)) { 2933 if (unlikely(!object)) {
2807 local_irq_enable();
2808 /* 2934 /*
2809 * Invoking slow path likely have side-effect 2935 * Invoking slow path likely have side-effect
2810 * of re-populating per CPU c->freelist 2936 * of re-populating per CPU c->freelist
2811 */ 2937 */
2812 p[i] = __slab_alloc(s, flags, NUMA_NO_NODE, 2938 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
2813 _RET_IP_, c); 2939 _RET_IP_, c);
2814 if (unlikely(!p[i])) { 2940 if (unlikely(!p[i]))
2815 __kmem_cache_free_bulk(s, i, p); 2941 goto error;
2816 return false; 2942
2817 }
2818 local_irq_disable();
2819 c = this_cpu_ptr(s->cpu_slab); 2943 c = this_cpu_ptr(s->cpu_slab);
2820 continue; /* goto for-loop */ 2944 continue; /* goto for-loop */
2821 } 2945 }
2822
2823 /* kmem_cache debug support */
2824 s = slab_pre_alloc_hook(s, flags);
2825 if (unlikely(!s)) {
2826 __kmem_cache_free_bulk(s, i, p);
2827 c->tid = next_tid(c->tid);
2828 local_irq_enable();
2829 return false;
2830 }
2831
2832 c->freelist = get_freepointer(s, object); 2946 c->freelist = get_freepointer(s, object);
2833 p[i] = object; 2947 p[i] = object;
2834
2835 /* kmem_cache debug support */
2836 slab_post_alloc_hook(s, flags, object);
2837 } 2948 }
2838 c->tid = next_tid(c->tid); 2949 c->tid = next_tid(c->tid);
2839 local_irq_enable(); 2950 local_irq_enable();
@@ -2846,7 +2957,14 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
2846 memset(p[j], 0, s->object_size); 2957 memset(p[j], 0, s->object_size);
2847 } 2958 }
2848 2959
2849 return true; 2960 /* memcg and kmem_cache debug support */
2961 slab_post_alloc_hook(s, flags, size, p);
2962 return i;
2963error:
2964 local_irq_enable();
2965 slab_post_alloc_hook(s, flags, i, p);
2966 __kmem_cache_free_bulk(s, i, p);
2967 return 0;
2850} 2968}
2851EXPORT_SYMBOL(kmem_cache_alloc_bulk); 2969EXPORT_SYMBOL(kmem_cache_alloc_bulk);
2852 2970
@@ -3511,7 +3629,7 @@ void kfree(const void *x)
3511 __free_kmem_pages(page, compound_order(page)); 3629 __free_kmem_pages(page, compound_order(page));
3512 return; 3630 return;
3513 } 3631 }
3514 slab_free(page->slab_cache, page, object, _RET_IP_); 3632 slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
3515} 3633}
3516EXPORT_SYMBOL(kfree); 3634EXPORT_SYMBOL(kfree);
3517 3635
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index d04563480c94..8e3c9c5a3042 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1443,7 +1443,6 @@ struct vm_struct *remove_vm_area(const void *addr)
1443 vmap_debug_free_range(va->va_start, va->va_end); 1443 vmap_debug_free_range(va->va_start, va->va_end);
1444 kasan_free_shadow(vm); 1444 kasan_free_shadow(vm);
1445 free_unmap_vmap_area(va); 1445 free_unmap_vmap_area(va);
1446 vm->size -= PAGE_SIZE;
1447 1446
1448 return vm; 1447 return vm;
1449 } 1448 }
@@ -1468,8 +1467,8 @@ static void __vunmap(const void *addr, int deallocate_pages)
1468 return; 1467 return;
1469 } 1468 }
1470 1469
1471 debug_check_no_locks_freed(addr, area->size); 1470 debug_check_no_locks_freed(addr, get_vm_area_size(area));
1472 debug_check_no_obj_freed(addr, area->size); 1471 debug_check_no_obj_freed(addr, get_vm_area_size(area));
1473 1472
1474 if (deallocate_pages) { 1473 if (deallocate_pages) {
1475 int i; 1474 int i;
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 496b27588493..e2ed69850489 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -30,7 +30,9 @@ bool vlan_do_receive(struct sk_buff **skbp)
30 skb->pkt_type = PACKET_HOST; 30 skb->pkt_type = PACKET_HOST;
31 } 31 }
32 32
33 if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) { 33 if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR) &&
34 !netif_is_macvlan_port(vlan_dev) &&
35 !netif_is_bridge_port(vlan_dev)) {
34 unsigned int offset = skb->data - skb_mac_header(skb); 36 unsigned int offset = skb->data - skb_mac_header(skb);
35 37
36 /* 38 /*
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index a3bffd1ec2b4..70306cc9d814 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -271,11 +271,11 @@ static long bt_sock_data_wait(struct sock *sk, long timeo)
271 if (signal_pending(current) || !timeo) 271 if (signal_pending(current) || !timeo)
272 break; 272 break;
273 273
274 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 274 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
275 release_sock(sk); 275 release_sock(sk);
276 timeo = schedule_timeout(timeo); 276 timeo = schedule_timeout(timeo);
277 lock_sock(sk); 277 lock_sock(sk);
278 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 278 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
279 } 279 }
280 280
281 __set_current_state(TASK_RUNNING); 281 __set_current_state(TASK_RUNNING);
@@ -441,7 +441,7 @@ unsigned int bt_sock_poll(struct file *file, struct socket *sock,
441 if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk)) 441 if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk))
442 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 442 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
443 else 443 else
444 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 444 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
445 445
446 return mask; 446 return mask;
447} 447}
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index c91353841e40..ffed8a1d4f27 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -3027,8 +3027,13 @@ static void smp_ready_cb(struct l2cap_chan *chan)
3027 3027
3028 BT_DBG("chan %p", chan); 3028 BT_DBG("chan %p", chan);
3029 3029
3030 /* No need to call l2cap_chan_hold() here since we already own
3031 * the reference taken in smp_new_conn_cb(). This is just the
3032 * first time that we tie it to a specific pointer. The code in
3033 * l2cap_core.c ensures that there's no risk this function wont
3034 * get called if smp_new_conn_cb was previously called.
3035 */
3030 conn->smp = chan; 3036 conn->smp = chan;
3031 l2cap_chan_hold(chan);
3032 3037
3033 if (hcon->type == ACL_LINK && test_bit(HCI_CONN_ENCRYPT, &hcon->flags)) 3038 if (hcon->type == ACL_LINK && test_bit(HCI_CONN_ENCRYPT, &hcon->flags))
3034 bredr_pairing(chan); 3039 bredr_pairing(chan);
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index f7e8dee64fc8..5f3f64553179 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -48,7 +48,7 @@ void br_set_state(struct net_bridge_port *p, unsigned int state)
48 48
49 p->state = state; 49 p->state = state;
50 err = switchdev_port_attr_set(p->dev, &attr); 50 err = switchdev_port_attr_set(p->dev, &attr);
51 if (err) 51 if (err && err != -EOPNOTSUPP)
52 br_warn(p->br, "error setting offload STP state on port %u(%s)\n", 52 br_warn(p->br, "error setting offload STP state on port %u(%s)\n",
53 (unsigned int) p->port_no, p->dev->name); 53 (unsigned int) p->port_no, p->dev->name);
54} 54}
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index fa53d7a89f48..5396ff08af32 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -50,7 +50,7 @@ void br_init_port(struct net_bridge_port *p)
50 p->config_pending = 0; 50 p->config_pending = 0;
51 51
52 err = switchdev_port_attr_set(p->dev, &attr); 52 err = switchdev_port_attr_set(p->dev, &attr);
53 if (err) 53 if (err && err != -EOPNOTSUPP)
54 netdev_err(p->dev, "failed to set HW ageing time\n"); 54 netdev_err(p->dev, "failed to set HW ageing time\n");
55} 55}
56 56
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index cc858919108e..aa209b1066c9 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -323,7 +323,7 @@ static long caif_stream_data_wait(struct sock *sk, long timeo)
323 !timeo) 323 !timeo)
324 break; 324 break;
325 325
326 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 326 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
327 release_sock(sk); 327 release_sock(sk);
328 timeo = schedule_timeout(timeo); 328 timeo = schedule_timeout(timeo);
329 lock_sock(sk); 329 lock_sock(sk);
@@ -331,7 +331,7 @@ static long caif_stream_data_wait(struct sock *sk, long timeo)
331 if (sock_flag(sk, SOCK_DEAD)) 331 if (sock_flag(sk, SOCK_DEAD))
332 break; 332 break;
333 333
334 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 334 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
335 } 335 }
336 336
337 finish_wait(sk_sleep(sk), &wait); 337 finish_wait(sk_sleep(sk), &wait);
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 617088aee21d..d62af69ad844 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -785,7 +785,7 @@ unsigned int datagram_poll(struct file *file, struct socket *sock,
785 if (sock_writeable(sk)) 785 if (sock_writeable(sk))
786 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 786 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
787 else 787 else
788 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 788 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
789 789
790 return mask; 790 return mask;
791} 791}
diff --git a/net/core/dev.c b/net/core/dev.c
index ab9b8d0d115e..ae00b894e675 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2403,17 +2403,20 @@ static void skb_warn_bad_offload(const struct sk_buff *skb)
2403{ 2403{
2404 static const netdev_features_t null_features = 0; 2404 static const netdev_features_t null_features = 0;
2405 struct net_device *dev = skb->dev; 2405 struct net_device *dev = skb->dev;
2406 const char *driver = ""; 2406 const char *name = "";
2407 2407
2408 if (!net_ratelimit()) 2408 if (!net_ratelimit())
2409 return; 2409 return;
2410 2410
2411 if (dev && dev->dev.parent) 2411 if (dev) {
2412 driver = dev_driver_string(dev->dev.parent); 2412 if (dev->dev.parent)
2413 2413 name = dev_driver_string(dev->dev.parent);
2414 else
2415 name = netdev_name(dev);
2416 }
2414 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d " 2417 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2415 "gso_type=%d ip_summed=%d\n", 2418 "gso_type=%d ip_summed=%d\n",
2416 driver, dev ? &dev->features : &null_features, 2419 name, dev ? &dev->features : &null_features,
2417 skb->sk ? &skb->sk->sk_route_caps : &null_features, 2420 skb->sk ? &skb->sk->sk_route_caps : &null_features,
2418 skb->len, skb->data_len, skb_shinfo(skb)->gso_size, 2421 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2419 skb_shinfo(skb)->gso_type, skb->ip_summed); 2422 skb_shinfo(skb)->gso_type, skb->ip_summed);
@@ -6426,11 +6429,16 @@ int __netdev_update_features(struct net_device *dev)
6426 6429
6427 if (dev->netdev_ops->ndo_set_features) 6430 if (dev->netdev_ops->ndo_set_features)
6428 err = dev->netdev_ops->ndo_set_features(dev, features); 6431 err = dev->netdev_ops->ndo_set_features(dev, features);
6432 else
6433 err = 0;
6429 6434
6430 if (unlikely(err < 0)) { 6435 if (unlikely(err < 0)) {
6431 netdev_err(dev, 6436 netdev_err(dev,
6432 "set_features() failed (%d); wanted %pNF, left %pNF\n", 6437 "set_features() failed (%d); wanted %pNF, left %pNF\n",
6433 err, &features, &dev->features); 6438 err, &features, &dev->features);
6439 /* return non-0 since some features might have changed and
6440 * it's better to fire a spurious notification than miss it
6441 */
6434 return -1; 6442 return -1;
6435 } 6443 }
6436 6444
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 1aa8437ed6c4..f18ae91b652e 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -857,7 +857,7 @@ static void neigh_probe(struct neighbour *neigh)
857 struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue); 857 struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
858 /* keep skb alive even if arp_queue overflows */ 858 /* keep skb alive even if arp_queue overflows */
859 if (skb) 859 if (skb)
860 skb = skb_copy(skb, GFP_ATOMIC); 860 skb = skb_clone(skb, GFP_ATOMIC);
861 write_unlock(&neigh->lock); 861 write_unlock(&neigh->lock);
862 neigh->ops->solicit(neigh, skb); 862 neigh->ops->solicit(neigh, skb);
863 atomic_inc(&neigh->probes); 863 atomic_inc(&neigh->probes);
@@ -2215,7 +2215,7 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2215 ndm->ndm_pad2 = 0; 2215 ndm->ndm_pad2 = 0;
2216 ndm->ndm_flags = pn->flags | NTF_PROXY; 2216 ndm->ndm_flags = pn->flags | NTF_PROXY;
2217 ndm->ndm_type = RTN_UNICAST; 2217 ndm->ndm_type = RTN_UNICAST;
2218 ndm->ndm_ifindex = pn->dev->ifindex; 2218 ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2219 ndm->ndm_state = NUD_NONE; 2219 ndm->ndm_state = NUD_NONE;
2220 2220
2221 if (nla_put(skb, NDA_DST, tbl->key_len, pn->key)) 2221 if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
@@ -2333,7 +2333,7 @@ static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2333 if (h > s_h) 2333 if (h > s_h)
2334 s_idx = 0; 2334 s_idx = 0;
2335 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) { 2335 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2336 if (dev_net(n->dev) != net) 2336 if (pneigh_net(n) != net)
2337 continue; 2337 continue;
2338 if (idx < s_idx) 2338 if (idx < s_idx)
2339 goto next; 2339 goto next;
diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
index 6441f47b1a8f..2e4df84c34a1 100644
--- a/net/core/netclassid_cgroup.c
+++ b/net/core/netclassid_cgroup.c
@@ -56,7 +56,7 @@ static void cgrp_css_free(struct cgroup_subsys_state *css)
56 kfree(css_cls_state(css)); 56 kfree(css_cls_state(css));
57} 57}
58 58
59static int update_classid(const void *v, struct file *file, unsigned n) 59static int update_classid_sock(const void *v, struct file *file, unsigned n)
60{ 60{
61 int err; 61 int err;
62 struct socket *sock = sock_from_file(file, &err); 62 struct socket *sock = sock_from_file(file, &err);
@@ -67,18 +67,25 @@ static int update_classid(const void *v, struct file *file, unsigned n)
67 return 0; 67 return 0;
68} 68}
69 69
70static void cgrp_attach(struct cgroup_subsys_state *css, 70static void update_classid(struct cgroup_subsys_state *css, void *v)
71 struct cgroup_taskset *tset)
72{ 71{
73 struct cgroup_cls_state *cs = css_cls_state(css); 72 struct css_task_iter it;
74 void *v = (void *)(unsigned long)cs->classid;
75 struct task_struct *p; 73 struct task_struct *p;
76 74
77 cgroup_taskset_for_each(p, tset) { 75 css_task_iter_start(css, &it);
76 while ((p = css_task_iter_next(&it))) {
78 task_lock(p); 77 task_lock(p);
79 iterate_fd(p->files, 0, update_classid, v); 78 iterate_fd(p->files, 0, update_classid_sock, v);
80 task_unlock(p); 79 task_unlock(p);
81 } 80 }
81 css_task_iter_end(&it);
82}
83
84static void cgrp_attach(struct cgroup_subsys_state *css,
85 struct cgroup_taskset *tset)
86{
87 update_classid(css,
88 (void *)(unsigned long)css_cls_state(css)->classid);
82} 89}
83 90
84static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft) 91static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft)
@@ -89,8 +96,11 @@ static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft)
89static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft, 96static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
90 u64 value) 97 u64 value)
91{ 98{
92 css_cls_state(css)->classid = (u32) value; 99 struct cgroup_cls_state *cs = css_cls_state(css);
100
101 cs->classid = (u32)value;
93 102
103 update_classid(css, (void *)(unsigned long)cs->classid);
94 return 0; 104 return 0;
95} 105}
96 106
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 504bd17b7456..34ba7a08876d 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1045,15 +1045,156 @@ static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
1045 return 0; 1045 return 0;
1046} 1046}
1047 1047
1048static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
1049 struct net_device *dev)
1050{
1051 const struct rtnl_link_stats64 *stats;
1052 struct rtnl_link_stats64 temp;
1053 struct nlattr *attr;
1054
1055 stats = dev_get_stats(dev, &temp);
1056
1057 attr = nla_reserve(skb, IFLA_STATS,
1058 sizeof(struct rtnl_link_stats));
1059 if (!attr)
1060 return -EMSGSIZE;
1061
1062 copy_rtnl_link_stats(nla_data(attr), stats);
1063
1064 attr = nla_reserve(skb, IFLA_STATS64,
1065 sizeof(struct rtnl_link_stats64));
1066 if (!attr)
1067 return -EMSGSIZE;
1068
1069 copy_rtnl_link_stats64(nla_data(attr), stats);
1070
1071 return 0;
1072}
1073
1074static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
1075 struct net_device *dev,
1076 int vfs_num,
1077 struct nlattr *vfinfo)
1078{
1079 struct ifla_vf_rss_query_en vf_rss_query_en;
1080 struct ifla_vf_link_state vf_linkstate;
1081 struct ifla_vf_spoofchk vf_spoofchk;
1082 struct ifla_vf_tx_rate vf_tx_rate;
1083 struct ifla_vf_stats vf_stats;
1084 struct ifla_vf_trust vf_trust;
1085 struct ifla_vf_vlan vf_vlan;
1086 struct ifla_vf_rate vf_rate;
1087 struct nlattr *vf, *vfstats;
1088 struct ifla_vf_mac vf_mac;
1089 struct ifla_vf_info ivi;
1090
1091 /* Not all SR-IOV capable drivers support the
1092 * spoofcheck and "RSS query enable" query. Preset to
1093 * -1 so the user space tool can detect that the driver
1094 * didn't report anything.
1095 */
1096 ivi.spoofchk = -1;
1097 ivi.rss_query_en = -1;
1098 ivi.trusted = -1;
1099 memset(ivi.mac, 0, sizeof(ivi.mac));
1100 /* The default value for VF link state is "auto"
1101 * IFLA_VF_LINK_STATE_AUTO which equals zero
1102 */
1103 ivi.linkstate = 0;
1104 if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
1105 return 0;
1106
1107 vf_mac.vf =
1108 vf_vlan.vf =
1109 vf_rate.vf =
1110 vf_tx_rate.vf =
1111 vf_spoofchk.vf =
1112 vf_linkstate.vf =
1113 vf_rss_query_en.vf =
1114 vf_trust.vf = ivi.vf;
1115
1116 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
1117 vf_vlan.vlan = ivi.vlan;
1118 vf_vlan.qos = ivi.qos;
1119 vf_tx_rate.rate = ivi.max_tx_rate;
1120 vf_rate.min_tx_rate = ivi.min_tx_rate;
1121 vf_rate.max_tx_rate = ivi.max_tx_rate;
1122 vf_spoofchk.setting = ivi.spoofchk;
1123 vf_linkstate.link_state = ivi.linkstate;
1124 vf_rss_query_en.setting = ivi.rss_query_en;
1125 vf_trust.setting = ivi.trusted;
1126 vf = nla_nest_start(skb, IFLA_VF_INFO);
1127 if (!vf) {
1128 nla_nest_cancel(skb, vfinfo);
1129 return -EMSGSIZE;
1130 }
1131 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
1132 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
1133 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
1134 &vf_rate) ||
1135 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
1136 &vf_tx_rate) ||
1137 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
1138 &vf_spoofchk) ||
1139 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
1140 &vf_linkstate) ||
1141 nla_put(skb, IFLA_VF_RSS_QUERY_EN,
1142 sizeof(vf_rss_query_en),
1143 &vf_rss_query_en) ||
1144 nla_put(skb, IFLA_VF_TRUST,
1145 sizeof(vf_trust), &vf_trust))
1146 return -EMSGSIZE;
1147 memset(&vf_stats, 0, sizeof(vf_stats));
1148 if (dev->netdev_ops->ndo_get_vf_stats)
1149 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
1150 &vf_stats);
1151 vfstats = nla_nest_start(skb, IFLA_VF_STATS);
1152 if (!vfstats) {
1153 nla_nest_cancel(skb, vf);
1154 nla_nest_cancel(skb, vfinfo);
1155 return -EMSGSIZE;
1156 }
1157 if (nla_put_u64(skb, IFLA_VF_STATS_RX_PACKETS,
1158 vf_stats.rx_packets) ||
1159 nla_put_u64(skb, IFLA_VF_STATS_TX_PACKETS,
1160 vf_stats.tx_packets) ||
1161 nla_put_u64(skb, IFLA_VF_STATS_RX_BYTES,
1162 vf_stats.rx_bytes) ||
1163 nla_put_u64(skb, IFLA_VF_STATS_TX_BYTES,
1164 vf_stats.tx_bytes) ||
1165 nla_put_u64(skb, IFLA_VF_STATS_BROADCAST,
1166 vf_stats.broadcast) ||
1167 nla_put_u64(skb, IFLA_VF_STATS_MULTICAST,
1168 vf_stats.multicast))
1169 return -EMSGSIZE;
1170 nla_nest_end(skb, vfstats);
1171 nla_nest_end(skb, vf);
1172 return 0;
1173}
1174
1175static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
1176{
1177 struct rtnl_link_ifmap map = {
1178 .mem_start = dev->mem_start,
1179 .mem_end = dev->mem_end,
1180 .base_addr = dev->base_addr,
1181 .irq = dev->irq,
1182 .dma = dev->dma,
1183 .port = dev->if_port,
1184 };
1185 if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
1186 return -EMSGSIZE;
1187
1188 return 0;
1189}
1190
1048static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, 1191static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
1049 int type, u32 pid, u32 seq, u32 change, 1192 int type, u32 pid, u32 seq, u32 change,
1050 unsigned int flags, u32 ext_filter_mask) 1193 unsigned int flags, u32 ext_filter_mask)
1051{ 1194{
1052 struct ifinfomsg *ifm; 1195 struct ifinfomsg *ifm;
1053 struct nlmsghdr *nlh; 1196 struct nlmsghdr *nlh;
1054 struct rtnl_link_stats64 temp; 1197 struct nlattr *af_spec;
1055 const struct rtnl_link_stats64 *stats;
1056 struct nlattr *attr, *af_spec;
1057 struct rtnl_af_ops *af_ops; 1198 struct rtnl_af_ops *af_ops;
1058 struct net_device *upper_dev = netdev_master_upper_dev_get(dev); 1199 struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
1059 1200
@@ -1096,18 +1237,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
1096 nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down)) 1237 nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
1097 goto nla_put_failure; 1238 goto nla_put_failure;
1098 1239
1099 if (1) { 1240 if (rtnl_fill_link_ifmap(skb, dev))
1100 struct rtnl_link_ifmap map = { 1241 goto nla_put_failure;
1101 .mem_start = dev->mem_start,
1102 .mem_end = dev->mem_end,
1103 .base_addr = dev->base_addr,
1104 .irq = dev->irq,
1105 .dma = dev->dma,
1106 .port = dev->if_port,
1107 };
1108 if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
1109 goto nla_put_failure;
1110 }
1111 1242
1112 if (dev->addr_len) { 1243 if (dev->addr_len) {
1113 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) || 1244 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
@@ -1124,128 +1255,27 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
1124 if (rtnl_phys_switch_id_fill(skb, dev)) 1255 if (rtnl_phys_switch_id_fill(skb, dev))
1125 goto nla_put_failure; 1256 goto nla_put_failure;
1126 1257
1127 attr = nla_reserve(skb, IFLA_STATS, 1258 if (rtnl_fill_stats(skb, dev))
1128 sizeof(struct rtnl_link_stats));
1129 if (attr == NULL)
1130 goto nla_put_failure;
1131
1132 stats = dev_get_stats(dev, &temp);
1133 copy_rtnl_link_stats(nla_data(attr), stats);
1134
1135 attr = nla_reserve(skb, IFLA_STATS64,
1136 sizeof(struct rtnl_link_stats64));
1137 if (attr == NULL)
1138 goto nla_put_failure; 1259 goto nla_put_failure;
1139 copy_rtnl_link_stats64(nla_data(attr), stats);
1140 1260
1141 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF) && 1261 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF) &&
1142 nla_put_u32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent))) 1262 nla_put_u32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)))
1143 goto nla_put_failure; 1263 goto nla_put_failure;
1144 1264
1145 if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent 1265 if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent &&
1146 && (ext_filter_mask & RTEXT_FILTER_VF)) { 1266 ext_filter_mask & RTEXT_FILTER_VF) {
1147 int i; 1267 int i;
1148 1268 struct nlattr *vfinfo;
1149 struct nlattr *vfinfo, *vf, *vfstats;
1150 int num_vfs = dev_num_vf(dev->dev.parent); 1269 int num_vfs = dev_num_vf(dev->dev.parent);
1151 1270
1152 vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST); 1271 vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST);
1153 if (!vfinfo) 1272 if (!vfinfo)
1154 goto nla_put_failure; 1273 goto nla_put_failure;
1155 for (i = 0; i < num_vfs; i++) { 1274 for (i = 0; i < num_vfs; i++) {
1156 struct ifla_vf_info ivi; 1275 if (rtnl_fill_vfinfo(skb, dev, i, vfinfo))
1157 struct ifla_vf_mac vf_mac;
1158 struct ifla_vf_vlan vf_vlan;
1159 struct ifla_vf_rate vf_rate;
1160 struct ifla_vf_tx_rate vf_tx_rate;
1161 struct ifla_vf_spoofchk vf_spoofchk;
1162 struct ifla_vf_link_state vf_linkstate;
1163 struct ifla_vf_rss_query_en vf_rss_query_en;
1164 struct ifla_vf_stats vf_stats;
1165 struct ifla_vf_trust vf_trust;
1166
1167 /*
1168 * Not all SR-IOV capable drivers support the
1169 * spoofcheck and "RSS query enable" query. Preset to
1170 * -1 so the user space tool can detect that the driver
1171 * didn't report anything.
1172 */
1173 ivi.spoofchk = -1;
1174 ivi.rss_query_en = -1;
1175 ivi.trusted = -1;
1176 memset(ivi.mac, 0, sizeof(ivi.mac));
1177 /* The default value for VF link state is "auto"
1178 * IFLA_VF_LINK_STATE_AUTO which equals zero
1179 */
1180 ivi.linkstate = 0;
1181 if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi))
1182 break;
1183 vf_mac.vf =
1184 vf_vlan.vf =
1185 vf_rate.vf =
1186 vf_tx_rate.vf =
1187 vf_spoofchk.vf =
1188 vf_linkstate.vf =
1189 vf_rss_query_en.vf =
1190 vf_trust.vf = ivi.vf;
1191
1192 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
1193 vf_vlan.vlan = ivi.vlan;
1194 vf_vlan.qos = ivi.qos;
1195 vf_tx_rate.rate = ivi.max_tx_rate;
1196 vf_rate.min_tx_rate = ivi.min_tx_rate;
1197 vf_rate.max_tx_rate = ivi.max_tx_rate;
1198 vf_spoofchk.setting = ivi.spoofchk;
1199 vf_linkstate.link_state = ivi.linkstate;
1200 vf_rss_query_en.setting = ivi.rss_query_en;
1201 vf_trust.setting = ivi.trusted;
1202 vf = nla_nest_start(skb, IFLA_VF_INFO);
1203 if (!vf) {
1204 nla_nest_cancel(skb, vfinfo);
1205 goto nla_put_failure;
1206 }
1207 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
1208 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
1209 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
1210 &vf_rate) ||
1211 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
1212 &vf_tx_rate) ||
1213 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
1214 &vf_spoofchk) ||
1215 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
1216 &vf_linkstate) ||
1217 nla_put(skb, IFLA_VF_RSS_QUERY_EN,
1218 sizeof(vf_rss_query_en),
1219 &vf_rss_query_en) ||
1220 nla_put(skb, IFLA_VF_TRUST,
1221 sizeof(vf_trust), &vf_trust))
1222 goto nla_put_failure; 1276 goto nla_put_failure;
1223 memset(&vf_stats, 0, sizeof(vf_stats));
1224 if (dev->netdev_ops->ndo_get_vf_stats)
1225 dev->netdev_ops->ndo_get_vf_stats(dev, i,
1226 &vf_stats);
1227 vfstats = nla_nest_start(skb, IFLA_VF_STATS);
1228 if (!vfstats) {
1229 nla_nest_cancel(skb, vf);
1230 nla_nest_cancel(skb, vfinfo);
1231 goto nla_put_failure;
1232 }
1233 if (nla_put_u64(skb, IFLA_VF_STATS_RX_PACKETS,
1234 vf_stats.rx_packets) ||
1235 nla_put_u64(skb, IFLA_VF_STATS_TX_PACKETS,
1236 vf_stats.tx_packets) ||
1237 nla_put_u64(skb, IFLA_VF_STATS_RX_BYTES,
1238 vf_stats.rx_bytes) ||
1239 nla_put_u64(skb, IFLA_VF_STATS_TX_BYTES,
1240 vf_stats.tx_bytes) ||
1241 nla_put_u64(skb, IFLA_VF_STATS_BROADCAST,
1242 vf_stats.broadcast) ||
1243 nla_put_u64(skb, IFLA_VF_STATS_MULTICAST,
1244 vf_stats.multicast))
1245 goto nla_put_failure;
1246 nla_nest_end(skb, vfstats);
1247 nla_nest_end(skb, vf);
1248 } 1277 }
1278
1249 nla_nest_end(skb, vfinfo); 1279 nla_nest_end(skb, vfinfo);
1250 } 1280 }
1251 1281
diff --git a/net/core/scm.c b/net/core/scm.c
index 3b6899b7d810..8a1741b14302 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -305,6 +305,8 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
305 err = put_user(cmlen, &cm->cmsg_len); 305 err = put_user(cmlen, &cm->cmsg_len);
306 if (!err) { 306 if (!err) {
307 cmlen = CMSG_SPACE(i*sizeof(int)); 307 cmlen = CMSG_SPACE(i*sizeof(int));
308 if (msg->msg_controllen < cmlen)
309 cmlen = msg->msg_controllen;
308 msg->msg_control += cmlen; 310 msg->msg_control += cmlen;
309 msg->msg_controllen -= cmlen; 311 msg->msg_controllen -= cmlen;
310 } 312 }
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index aa41e6dd6429..152b9c70e252 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4268,7 +4268,8 @@ static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
4268 return NULL; 4268 return NULL;
4269 } 4269 }
4270 4270
4271 memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN); 4271 memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len,
4272 2 * ETH_ALEN);
4272 skb->mac_header += VLAN_HLEN; 4273 skb->mac_header += VLAN_HLEN;
4273 return skb; 4274 return skb;
4274} 4275}
diff --git a/net/core/sock.c b/net/core/sock.c
index 1e4dd54bfb5a..e31dfcee1729 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1530,7 +1530,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1530 skb_queue_head_init(&newsk->sk_receive_queue); 1530 skb_queue_head_init(&newsk->sk_receive_queue);
1531 skb_queue_head_init(&newsk->sk_write_queue); 1531 skb_queue_head_init(&newsk->sk_write_queue);
1532 1532
1533 spin_lock_init(&newsk->sk_dst_lock);
1534 rwlock_init(&newsk->sk_callback_lock); 1533 rwlock_init(&newsk->sk_callback_lock);
1535 lockdep_set_class_and_name(&newsk->sk_callback_lock, 1534 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1536 af_callback_keys + newsk->sk_family, 1535 af_callback_keys + newsk->sk_family,
@@ -1607,7 +1606,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1607{ 1606{
1608 u32 max_segs = 1; 1607 u32 max_segs = 1;
1609 1608
1610 __sk_dst_set(sk, dst); 1609 sk_dst_set(sk, dst);
1611 sk->sk_route_caps = dst->dev->features; 1610 sk->sk_route_caps = dst->dev->features;
1612 if (sk->sk_route_caps & NETIF_F_GSO) 1611 if (sk->sk_route_caps & NETIF_F_GSO)
1613 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; 1612 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
@@ -1815,7 +1814,7 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo)
1815{ 1814{
1816 DEFINE_WAIT(wait); 1815 DEFINE_WAIT(wait);
1817 1816
1818 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1817 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1819 for (;;) { 1818 for (;;) {
1820 if (!timeo) 1819 if (!timeo)
1821 break; 1820 break;
@@ -1861,7 +1860,7 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1861 if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf) 1860 if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
1862 break; 1861 break;
1863 1862
1864 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1863 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1865 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1864 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1866 err = -EAGAIN; 1865 err = -EAGAIN;
1867 if (!timeo) 1866 if (!timeo)
@@ -2048,9 +2047,9 @@ int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
2048 DEFINE_WAIT(wait); 2047 DEFINE_WAIT(wait);
2049 2048
2050 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 2049 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
2051 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 2050 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2052 rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb); 2051 rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb);
2053 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 2052 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2054 finish_wait(sk_sleep(sk), &wait); 2053 finish_wait(sk_sleep(sk), &wait);
2055 return rc; 2054 return rc;
2056} 2055}
@@ -2388,7 +2387,6 @@ void sock_init_data(struct socket *sock, struct sock *sk)
2388 } else 2387 } else
2389 sk->sk_wq = NULL; 2388 sk->sk_wq = NULL;
2390 2389
2391 spin_lock_init(&sk->sk_dst_lock);
2392 rwlock_init(&sk->sk_callback_lock); 2390 rwlock_init(&sk->sk_callback_lock);
2393 lockdep_set_class_and_name(&sk->sk_callback_lock, 2391 lockdep_set_class_and_name(&sk->sk_callback_lock,
2394 af_callback_keys + sk->sk_family, 2392 af_callback_keys + sk->sk_family,
diff --git a/net/core/stream.c b/net/core/stream.c
index d70f77a0c889..b96f7a79e544 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -39,7 +39,7 @@ void sk_stream_write_space(struct sock *sk)
39 wake_up_interruptible_poll(&wq->wait, POLLOUT | 39 wake_up_interruptible_poll(&wq->wait, POLLOUT |
40 POLLWRNORM | POLLWRBAND); 40 POLLWRNORM | POLLWRBAND);
41 if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) 41 if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
42 sock_wake_async(sock, SOCK_WAKE_SPACE, POLL_OUT); 42 sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT);
43 rcu_read_unlock(); 43 rcu_read_unlock();
44 } 44 }
45} 45}
@@ -126,7 +126,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
126 current_timeo = vm_wait = (prandom_u32() % (HZ / 5)) + 2; 126 current_timeo = vm_wait = (prandom_u32() % (HZ / 5)) + 2;
127 127
128 while (1) { 128 while (1) {
129 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 129 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
130 130
131 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 131 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
132 132
@@ -139,7 +139,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
139 } 139 }
140 if (signal_pending(current)) 140 if (signal_pending(current))
141 goto do_interrupted; 141 goto do_interrupted;
142 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 142 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
143 if (sk_stream_memory_free(sk) && !vm_wait) 143 if (sk_stream_memory_free(sk) && !vm_wait)
144 break; 144 break;
145 145
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index db5fc2440a23..9c6d0508e63a 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -202,7 +202,9 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req
202 security_req_classify_flow(req, flowi6_to_flowi(&fl6)); 202 security_req_classify_flow(req, flowi6_to_flowi(&fl6));
203 203
204 204
205 final_p = fl6_update_dst(&fl6, np->opt, &final); 205 rcu_read_lock();
206 final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
207 rcu_read_unlock();
206 208
207 dst = ip6_dst_lookup_flow(sk, &fl6, final_p); 209 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
208 if (IS_ERR(dst)) { 210 if (IS_ERR(dst)) {
@@ -219,7 +221,10 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req
219 &ireq->ir_v6_loc_addr, 221 &ireq->ir_v6_loc_addr,
220 &ireq->ir_v6_rmt_addr); 222 &ireq->ir_v6_rmt_addr);
221 fl6.daddr = ireq->ir_v6_rmt_addr; 223 fl6.daddr = ireq->ir_v6_rmt_addr;
222 err = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass); 224 rcu_read_lock();
225 err = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt),
226 np->tclass);
227 rcu_read_unlock();
223 err = net_xmit_eval(err); 228 err = net_xmit_eval(err);
224 } 229 }
225 230
@@ -387,6 +392,7 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
387 struct inet_request_sock *ireq = inet_rsk(req); 392 struct inet_request_sock *ireq = inet_rsk(req);
388 struct ipv6_pinfo *newnp; 393 struct ipv6_pinfo *newnp;
389 const struct ipv6_pinfo *np = inet6_sk(sk); 394 const struct ipv6_pinfo *np = inet6_sk(sk);
395 struct ipv6_txoptions *opt;
390 struct inet_sock *newinet; 396 struct inet_sock *newinet;
391 struct dccp6_sock *newdp6; 397 struct dccp6_sock *newdp6;
392 struct sock *newsk; 398 struct sock *newsk;
@@ -453,7 +459,7 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
453 * comment in that function for the gory details. -acme 459 * comment in that function for the gory details. -acme
454 */ 460 */
455 461
456 __ip6_dst_store(newsk, dst, NULL, NULL); 462 ip6_dst_store(newsk, dst, NULL, NULL);
457 newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM | 463 newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM |
458 NETIF_F_TSO); 464 NETIF_F_TSO);
459 newdp6 = (struct dccp6_sock *)newsk; 465 newdp6 = (struct dccp6_sock *)newsk;
@@ -488,13 +494,15 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
488 * Yes, keeping reference count would be much more clever, but we make 494 * Yes, keeping reference count would be much more clever, but we make
489 * one more one thing there: reattach optmem to newsk. 495 * one more one thing there: reattach optmem to newsk.
490 */ 496 */
491 if (np->opt != NULL) 497 opt = rcu_dereference(np->opt);
492 newnp->opt = ipv6_dup_options(newsk, np->opt); 498 if (opt) {
493 499 opt = ipv6_dup_options(newsk, opt);
500 RCU_INIT_POINTER(newnp->opt, opt);
501 }
494 inet_csk(newsk)->icsk_ext_hdr_len = 0; 502 inet_csk(newsk)->icsk_ext_hdr_len = 0;
495 if (newnp->opt != NULL) 503 if (opt)
496 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen + 504 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
497 newnp->opt->opt_flen); 505 opt->opt_flen;
498 506
499 dccp_sync_mss(newsk, dst_mtu(dst)); 507 dccp_sync_mss(newsk, dst_mtu(dst));
500 508
@@ -757,6 +765,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
757 struct ipv6_pinfo *np = inet6_sk(sk); 765 struct ipv6_pinfo *np = inet6_sk(sk);
758 struct dccp_sock *dp = dccp_sk(sk); 766 struct dccp_sock *dp = dccp_sk(sk);
759 struct in6_addr *saddr = NULL, *final_p, final; 767 struct in6_addr *saddr = NULL, *final_p, final;
768 struct ipv6_txoptions *opt;
760 struct flowi6 fl6; 769 struct flowi6 fl6;
761 struct dst_entry *dst; 770 struct dst_entry *dst;
762 int addr_type; 771 int addr_type;
@@ -856,7 +865,8 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
856 fl6.fl6_sport = inet->inet_sport; 865 fl6.fl6_sport = inet->inet_sport;
857 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 866 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
858 867
859 final_p = fl6_update_dst(&fl6, np->opt, &final); 868 opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
869 final_p = fl6_update_dst(&fl6, opt, &final);
860 870
861 dst = ip6_dst_lookup_flow(sk, &fl6, final_p); 871 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
862 if (IS_ERR(dst)) { 872 if (IS_ERR(dst)) {
@@ -873,12 +883,11 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
873 np->saddr = *saddr; 883 np->saddr = *saddr;
874 inet->inet_rcv_saddr = LOOPBACK4_IPV6; 884 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
875 885
876 __ip6_dst_store(sk, dst, NULL, NULL); 886 ip6_dst_store(sk, dst, NULL, NULL);
877 887
878 icsk->icsk_ext_hdr_len = 0; 888 icsk->icsk_ext_hdr_len = 0;
879 if (np->opt != NULL) 889 if (opt)
880 icsk->icsk_ext_hdr_len = (np->opt->opt_flen + 890 icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen;
881 np->opt->opt_nflen);
882 891
883 inet->inet_dport = usin->sin6_port; 892 inet->inet_dport = usin->sin6_port;
884 893
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index b5cf13a28009..41e65804ddf5 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -339,8 +339,7 @@ unsigned int dccp_poll(struct file *file, struct socket *sock,
339 if (sk_stream_is_writeable(sk)) { 339 if (sk_stream_is_writeable(sk)) {
340 mask |= POLLOUT | POLLWRNORM; 340 mask |= POLLOUT | POLLWRNORM;
341 } else { /* send SIGIO later */ 341 } else { /* send SIGIO later */
342 set_bit(SOCK_ASYNC_NOSPACE, 342 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
343 &sk->sk_socket->flags);
344 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 343 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
345 344
346 /* Race breaker. If space is freed after 345 /* Race breaker. If space is freed after
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 675cf94e04f8..eebf5ac8ce18 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1747,9 +1747,9 @@ static int dn_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
1747 } 1747 }
1748 1748
1749 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1749 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1750 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1750 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1751 sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target)); 1751 sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target));
1752 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1752 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1753 finish_wait(sk_sleep(sk), &wait); 1753 finish_wait(sk_sleep(sk), &wait);
1754 } 1754 }
1755 1755
@@ -2004,10 +2004,10 @@ static int dn_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
2004 } 2004 }
2005 2005
2006 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 2006 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
2007 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 2007 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2008 sk_wait_event(sk, &timeo, 2008 sk_wait_event(sk, &timeo,
2009 !dn_queue_too_long(scp, queue, flags)); 2009 !dn_queue_too_long(scp, queue, flags));
2010 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 2010 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2011 finish_wait(sk_sleep(sk), &wait); 2011 finish_wait(sk_sleep(sk), &wait);
2012 continue; 2012 continue;
2013 } 2013 }
diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c
index 4677b6fa6dda..ecc28cff08ab 100644
--- a/net/dns_resolver/dns_query.c
+++ b/net/dns_resolver/dns_query.c
@@ -67,7 +67,7 @@
67 * Returns the size of the result on success, -ve error code otherwise. 67 * Returns the size of the result on success, -ve error code otherwise.
68 */ 68 */
69int dns_query(const char *type, const char *name, size_t namelen, 69int dns_query(const char *type, const char *name, size_t namelen,
70 const char *options, char **_result, time_t *_expiry) 70 const char *options, char **_result, time64_t *_expiry)
71{ 71{
72 struct key *rkey; 72 struct key *rkey;
73 const struct user_key_payload *upayload; 73 const struct user_key_payload *upayload;
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index 35a9788bb3ae..c7d1adca30d8 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -312,7 +312,7 @@ static void send_hsr_supervision_frame(struct hsr_port *master, u8 type)
312 return; 312 return;
313 313
314out: 314out:
315 WARN_ON_ONCE("HSR: Could not send supervision frame\n"); 315 WARN_ONCE(1, "HSR: Could not send supervision frame\n");
316 kfree_skb(skb); 316 kfree_skb(skb);
317} 317}
318 318
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 6baf36e11808..05e4cba14162 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -2126,7 +2126,7 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
2126 ASSERT_RTNL(); 2126 ASSERT_RTNL();
2127 2127
2128 in_dev = ip_mc_find_dev(net, imr); 2128 in_dev = ip_mc_find_dev(net, imr);
2129 if (!in_dev) { 2129 if (!imr->imr_ifindex && !imr->imr_address.s_addr && !in_dev) {
2130 ret = -ENODEV; 2130 ret = -ENODEV;
2131 goto out; 2131 goto out;
2132 } 2132 }
@@ -2147,7 +2147,8 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
2147 2147
2148 *imlp = iml->next_rcu; 2148 *imlp = iml->next_rcu;
2149 2149
2150 ip_mc_dec_group(in_dev, group); 2150 if (in_dev)
2151 ip_mc_dec_group(in_dev, group);
2151 2152
2152 /* decrease mem now to avoid the memleak warning */ 2153 /* decrease mem now to avoid the memleak warning */
2153 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); 2154 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 1feb15f23de8..46b9c887bede 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -563,7 +563,7 @@ static void reqsk_timer_handler(unsigned long data)
563 int max_retries, thresh; 563 int max_retries, thresh;
564 u8 defer_accept; 564 u8 defer_accept;
565 565
566 if (sk_listener->sk_state != TCP_LISTEN) 566 if (sk_state_load(sk_listener) != TCP_LISTEN)
567 goto drop; 567 goto drop;
568 568
569 max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries; 569 max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
@@ -749,7 +749,7 @@ int inet_csk_listen_start(struct sock *sk, int backlog)
749 * It is OK, because this socket enters to hash table only 749 * It is OK, because this socket enters to hash table only
750 * after validation is complete. 750 * after validation is complete.
751 */ 751 */
752 sk->sk_state = TCP_LISTEN; 752 sk_state_store(sk, TCP_LISTEN);
753 if (!sk->sk_prot->get_port(sk, inet->inet_num)) { 753 if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
754 inet->inet_sport = htons(inet->inet_num); 754 inet->inet_sport = htons(inet->inet_num);
755 755
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 92dd4b74d513..c3a38353f5dc 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -134,7 +134,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
134 struct mfc_cache *c, struct rtmsg *rtm); 134 struct mfc_cache *c, struct rtmsg *rtm);
135static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc, 135static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
136 int cmd); 136 int cmd);
137static void mroute_clean_tables(struct mr_table *mrt); 137static void mroute_clean_tables(struct mr_table *mrt, bool all);
138static void ipmr_expire_process(unsigned long arg); 138static void ipmr_expire_process(unsigned long arg);
139 139
140#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES 140#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
@@ -350,7 +350,7 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
350static void ipmr_free_table(struct mr_table *mrt) 350static void ipmr_free_table(struct mr_table *mrt)
351{ 351{
352 del_timer_sync(&mrt->ipmr_expire_timer); 352 del_timer_sync(&mrt->ipmr_expire_timer);
353 mroute_clean_tables(mrt); 353 mroute_clean_tables(mrt, true);
354 kfree(mrt); 354 kfree(mrt);
355} 355}
356 356
@@ -441,10 +441,6 @@ struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
441 return dev; 441 return dev;
442 442
443failure: 443failure:
444 /* allow the register to be completed before unregistering. */
445 rtnl_unlock();
446 rtnl_lock();
447
448 unregister_netdevice(dev); 444 unregister_netdevice(dev);
449 return NULL; 445 return NULL;
450} 446}
@@ -540,10 +536,6 @@ static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
540 return dev; 536 return dev;
541 537
542failure: 538failure:
543 /* allow the register to be completed before unregistering. */
544 rtnl_unlock();
545 rtnl_lock();
546
547 unregister_netdevice(dev); 539 unregister_netdevice(dev);
548 return NULL; 540 return NULL;
549} 541}
@@ -1208,7 +1200,7 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1208 * Close the multicast socket, and clear the vif tables etc 1200 * Close the multicast socket, and clear the vif tables etc
1209 */ 1201 */
1210 1202
1211static void mroute_clean_tables(struct mr_table *mrt) 1203static void mroute_clean_tables(struct mr_table *mrt, bool all)
1212{ 1204{
1213 int i; 1205 int i;
1214 LIST_HEAD(list); 1206 LIST_HEAD(list);
@@ -1217,8 +1209,9 @@ static void mroute_clean_tables(struct mr_table *mrt)
1217 /* Shut down all active vif entries */ 1209 /* Shut down all active vif entries */
1218 1210
1219 for (i = 0; i < mrt->maxvif; i++) { 1211 for (i = 0; i < mrt->maxvif; i++) {
1220 if (!(mrt->vif_table[i].flags & VIFF_STATIC)) 1212 if (!all && (mrt->vif_table[i].flags & VIFF_STATIC))
1221 vif_delete(mrt, i, 0, &list); 1213 continue;
1214 vif_delete(mrt, i, 0, &list);
1222 } 1215 }
1223 unregister_netdevice_many(&list); 1216 unregister_netdevice_many(&list);
1224 1217
@@ -1226,7 +1219,7 @@ static void mroute_clean_tables(struct mr_table *mrt)
1226 1219
1227 for (i = 0; i < MFC_LINES; i++) { 1220 for (i = 0; i < MFC_LINES; i++) {
1228 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) { 1221 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
1229 if (c->mfc_flags & MFC_STATIC) 1222 if (!all && (c->mfc_flags & MFC_STATIC))
1230 continue; 1223 continue;
1231 list_del_rcu(&c->list); 1224 list_del_rcu(&c->list);
1232 mroute_netlink_event(mrt, c, RTM_DELROUTE); 1225 mroute_netlink_event(mrt, c, RTM_DELROUTE);
@@ -1261,7 +1254,7 @@ static void mrtsock_destruct(struct sock *sk)
1261 NETCONFA_IFINDEX_ALL, 1254 NETCONFA_IFINDEX_ALL,
1262 net->ipv4.devconf_all); 1255 net->ipv4.devconf_all);
1263 RCU_INIT_POINTER(mrt->mroute_sk, NULL); 1256 RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1264 mroute_clean_tables(mrt); 1257 mroute_clean_tables(mrt, false);
1265 } 1258 }
1266 } 1259 }
1267 rtnl_unlock(); 1260 rtnl_unlock();
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c
index 657d2307f031..b3ca21b2ba9b 100644
--- a/net/ipv4/netfilter/nf_nat_pptp.c
+++ b/net/ipv4/netfilter/nf_nat_pptp.c
@@ -45,7 +45,7 @@ static void pptp_nat_expected(struct nf_conn *ct,
45 struct net *net = nf_ct_net(ct); 45 struct net *net = nf_ct_net(ct);
46 const struct nf_conn *master = ct->master; 46 const struct nf_conn *master = ct->master;
47 struct nf_conntrack_expect *other_exp; 47 struct nf_conntrack_expect *other_exp;
48 struct nf_conntrack_tuple t; 48 struct nf_conntrack_tuple t = {};
49 const struct nf_ct_pptp_master *ct_pptp_info; 49 const struct nf_ct_pptp_master *ct_pptp_info;
50 const struct nf_nat_pptp *nat_pptp_info; 50 const struct nf_nat_pptp *nat_pptp_info;
51 struct nf_nat_range range; 51 struct nf_nat_range range;
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 8c0d0bdc2a7c..63e5be0abd86 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -406,10 +406,12 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
406 ip_select_ident(net, skb, NULL); 406 ip_select_ident(net, skb, NULL);
407 407
408 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); 408 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
409 skb->transport_header += iphlen;
410 if (iph->protocol == IPPROTO_ICMP &&
411 length >= iphlen + sizeof(struct icmphdr))
412 icmp_out_count(net, ((struct icmphdr *)
413 skb_transport_header(skb))->type);
409 } 414 }
410 if (iph->protocol == IPPROTO_ICMP)
411 icmp_out_count(net, ((struct icmphdr *)
412 skb_transport_header(skb))->type);
413 415
414 err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, 416 err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
415 net, sk, skb, NULL, rt->dst.dev, 417 net, sk, skb, NULL, rt->dst.dev,
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 0cfa7c0c1e80..c82cca18c90f 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -451,11 +451,14 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
451 unsigned int mask; 451 unsigned int mask;
452 struct sock *sk = sock->sk; 452 struct sock *sk = sock->sk;
453 const struct tcp_sock *tp = tcp_sk(sk); 453 const struct tcp_sock *tp = tcp_sk(sk);
454 int state;
454 455
455 sock_rps_record_flow(sk); 456 sock_rps_record_flow(sk);
456 457
457 sock_poll_wait(file, sk_sleep(sk), wait); 458 sock_poll_wait(file, sk_sleep(sk), wait);
458 if (sk->sk_state == TCP_LISTEN) 459
460 state = sk_state_load(sk);
461 if (state == TCP_LISTEN)
459 return inet_csk_listen_poll(sk); 462 return inet_csk_listen_poll(sk);
460 463
461 /* Socket is not locked. We are protected from async events 464 /* Socket is not locked. We are protected from async events
@@ -492,14 +495,14 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
492 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent 495 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
493 * blocking on fresh not-connected or disconnected socket. --ANK 496 * blocking on fresh not-connected or disconnected socket. --ANK
494 */ 497 */
495 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE) 498 if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
496 mask |= POLLHUP; 499 mask |= POLLHUP;
497 if (sk->sk_shutdown & RCV_SHUTDOWN) 500 if (sk->sk_shutdown & RCV_SHUTDOWN)
498 mask |= POLLIN | POLLRDNORM | POLLRDHUP; 501 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
499 502
500 /* Connected or passive Fast Open socket? */ 503 /* Connected or passive Fast Open socket? */
501 if (sk->sk_state != TCP_SYN_SENT && 504 if (state != TCP_SYN_SENT &&
502 (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk)) { 505 (state != TCP_SYN_RECV || tp->fastopen_rsk)) {
503 int target = sock_rcvlowat(sk, 0, INT_MAX); 506 int target = sock_rcvlowat(sk, 0, INT_MAX);
504 507
505 if (tp->urg_seq == tp->copied_seq && 508 if (tp->urg_seq == tp->copied_seq &&
@@ -507,9 +510,6 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
507 tp->urg_data) 510 tp->urg_data)
508 target++; 511 target++;
509 512
510 /* Potential race condition. If read of tp below will
511 * escape above sk->sk_state, we can be illegally awaken
512 * in SYN_* states. */
513 if (tp->rcv_nxt - tp->copied_seq >= target) 513 if (tp->rcv_nxt - tp->copied_seq >= target)
514 mask |= POLLIN | POLLRDNORM; 514 mask |= POLLIN | POLLRDNORM;
515 515
@@ -517,8 +517,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
517 if (sk_stream_is_writeable(sk)) { 517 if (sk_stream_is_writeable(sk)) {
518 mask |= POLLOUT | POLLWRNORM; 518 mask |= POLLOUT | POLLWRNORM;
519 } else { /* send SIGIO later */ 519 } else { /* send SIGIO later */
520 set_bit(SOCK_ASYNC_NOSPACE, 520 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
521 &sk->sk_socket->flags);
522 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 521 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
523 522
524 /* Race breaker. If space is freed after 523 /* Race breaker. If space is freed after
@@ -906,7 +905,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
906 goto out_err; 905 goto out_err;
907 } 906 }
908 907
909 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 908 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
910 909
911 mss_now = tcp_send_mss(sk, &size_goal, flags); 910 mss_now = tcp_send_mss(sk, &size_goal, flags);
912 copied = 0; 911 copied = 0;
@@ -1134,7 +1133,7 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
1134 } 1133 }
1135 1134
1136 /* This should be in poll */ 1135 /* This should be in poll */
1137 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1136 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1138 1137
1139 mss_now = tcp_send_mss(sk, &size_goal, flags); 1138 mss_now = tcp_send_mss(sk, &size_goal, flags);
1140 1139
@@ -1934,7 +1933,7 @@ void tcp_set_state(struct sock *sk, int state)
1934 /* Change state AFTER socket is unhashed to avoid closed 1933 /* Change state AFTER socket is unhashed to avoid closed
1935 * socket sitting in hash tables. 1934 * socket sitting in hash tables.
1936 */ 1935 */
1937 sk->sk_state = state; 1936 sk_state_store(sk, state);
1938 1937
1939#ifdef STATE_TRACE 1938#ifdef STATE_TRACE
1940 SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]); 1939 SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]);
@@ -2644,7 +2643,8 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
2644 if (sk->sk_type != SOCK_STREAM) 2643 if (sk->sk_type != SOCK_STREAM)
2645 return; 2644 return;
2646 2645
2647 info->tcpi_state = sk->sk_state; 2646 info->tcpi_state = sk_state_load(sk);
2647
2648 info->tcpi_ca_state = icsk->icsk_ca_state; 2648 info->tcpi_ca_state = icsk->icsk_ca_state;
2649 info->tcpi_retransmits = icsk->icsk_retransmits; 2649 info->tcpi_retransmits = icsk->icsk_retransmits;
2650 info->tcpi_probes = icsk->icsk_probes_out; 2650 info->tcpi_probes = icsk->icsk_probes_out;
@@ -2672,7 +2672,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
2672 info->tcpi_snd_mss = tp->mss_cache; 2672 info->tcpi_snd_mss = tp->mss_cache;
2673 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss; 2673 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
2674 2674
2675 if (sk->sk_state == TCP_LISTEN) { 2675 if (info->tcpi_state == TCP_LISTEN) {
2676 info->tcpi_unacked = sk->sk_ack_backlog; 2676 info->tcpi_unacked = sk->sk_ack_backlog;
2677 info->tcpi_sacked = sk->sk_max_ack_backlog; 2677 info->tcpi_sacked = sk->sk_max_ack_backlog;
2678 } else { 2678 } else {
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index 479f34946177..b31604086edd 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -21,7 +21,7 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
21{ 21{
22 struct tcp_info *info = _info; 22 struct tcp_info *info = _info;
23 23
24 if (sk->sk_state == TCP_LISTEN) { 24 if (sk_state_load(sk) == TCP_LISTEN) {
25 r->idiag_rqueue = sk->sk_ack_backlog; 25 r->idiag_rqueue = sk->sk_ack_backlog;
26 r->idiag_wqueue = sk->sk_max_ack_backlog; 26 r->idiag_wqueue = sk->sk_max_ack_backlog;
27 } else if (sk->sk_type == SOCK_STREAM) { 27 } else if (sk->sk_type == SOCK_STREAM) {
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index fdd88c3803a6..2d656eef7f8e 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4481,19 +4481,34 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int
4481int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) 4481int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
4482{ 4482{
4483 struct sk_buff *skb; 4483 struct sk_buff *skb;
4484 int err = -ENOMEM;
4485 int data_len = 0;
4484 bool fragstolen; 4486 bool fragstolen;
4485 4487
4486 if (size == 0) 4488 if (size == 0)
4487 return 0; 4489 return 0;
4488 4490
4489 skb = alloc_skb(size, sk->sk_allocation); 4491 if (size > PAGE_SIZE) {
4492 int npages = min_t(size_t, size >> PAGE_SHIFT, MAX_SKB_FRAGS);
4493
4494 data_len = npages << PAGE_SHIFT;
4495 size = data_len + (size & ~PAGE_MASK);
4496 }
4497 skb = alloc_skb_with_frags(size - data_len, data_len,
4498 PAGE_ALLOC_COSTLY_ORDER,
4499 &err, sk->sk_allocation);
4490 if (!skb) 4500 if (!skb)
4491 goto err; 4501 goto err;
4492 4502
4503 skb_put(skb, size - data_len);
4504 skb->data_len = data_len;
4505 skb->len = size;
4506
4493 if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) 4507 if (tcp_try_rmem_schedule(sk, skb, skb->truesize))
4494 goto err_free; 4508 goto err_free;
4495 4509
4496 if (memcpy_from_msg(skb_put(skb, size), msg, size)) 4510 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
4511 if (err)
4497 goto err_free; 4512 goto err_free;
4498 4513
4499 TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt; 4514 TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt;
@@ -4509,7 +4524,8 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
4509err_free: 4524err_free:
4510 kfree_skb(skb); 4525 kfree_skb(skb);
4511err: 4526err:
4512 return -ENOMEM; 4527 return err;
4528
4513} 4529}
4514 4530
4515static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) 4531static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
@@ -5667,6 +5683,7 @@ discard:
5667 } 5683 }
5668 5684
5669 tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; 5685 tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
5686 tp->copied_seq = tp->rcv_nxt;
5670 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; 5687 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
5671 5688
5672 /* RFC1323: The window in SYN & SYN/ACK segments is 5689 /* RFC1323: The window in SYN & SYN/ACK segments is
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 950e28c0cdf2..db003438aaf5 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -921,7 +921,8 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
921 } 921 }
922 922
923 md5sig = rcu_dereference_protected(tp->md5sig_info, 923 md5sig = rcu_dereference_protected(tp->md5sig_info,
924 sock_owned_by_user(sk)); 924 sock_owned_by_user(sk) ||
925 lockdep_is_held(&sk->sk_lock.slock));
925 if (!md5sig) { 926 if (!md5sig) {
926 md5sig = kmalloc(sizeof(*md5sig), gfp); 927 md5sig = kmalloc(sizeof(*md5sig), gfp);
927 if (!md5sig) 928 if (!md5sig)
@@ -2158,6 +2159,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2158 __u16 destp = ntohs(inet->inet_dport); 2159 __u16 destp = ntohs(inet->inet_dport);
2159 __u16 srcp = ntohs(inet->inet_sport); 2160 __u16 srcp = ntohs(inet->inet_sport);
2160 int rx_queue; 2161 int rx_queue;
2162 int state;
2161 2163
2162 if (icsk->icsk_pending == ICSK_TIME_RETRANS || 2164 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2163 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || 2165 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
@@ -2175,17 +2177,18 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2175 timer_expires = jiffies; 2177 timer_expires = jiffies;
2176 } 2178 }
2177 2179
2178 if (sk->sk_state == TCP_LISTEN) 2180 state = sk_state_load(sk);
2181 if (state == TCP_LISTEN)
2179 rx_queue = sk->sk_ack_backlog; 2182 rx_queue = sk->sk_ack_backlog;
2180 else 2183 else
2181 /* 2184 /* Because we don't lock the socket,
2182 * because we dont lock socket, we might find a transient negative value 2185 * we might find a transient negative value.
2183 */ 2186 */
2184 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0); 2187 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2185 2188
2186 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX " 2189 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2187 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d", 2190 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2188 i, src, srcp, dest, destp, sk->sk_state, 2191 i, src, srcp, dest, destp, state,
2189 tp->write_seq - tp->snd_una, 2192 tp->write_seq - tp->snd_una,
2190 rx_queue, 2193 rx_queue,
2191 timer_active, 2194 timer_active,
@@ -2199,8 +2202,8 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2199 jiffies_to_clock_t(icsk->icsk_ack.ato), 2202 jiffies_to_clock_t(icsk->icsk_ack.ato),
2200 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, 2203 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2201 tp->snd_cwnd, 2204 tp->snd_cwnd,
2202 sk->sk_state == TCP_LISTEN ? 2205 state == TCP_LISTEN ?
2203 (fastopenq ? fastopenq->max_qlen : 0) : 2206 fastopenq->max_qlen :
2204 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)); 2207 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2205} 2208}
2206 2209
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index c9c716a483e4..193ba1fa8a9a 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -168,7 +168,7 @@ static int tcp_write_timeout(struct sock *sk)
168 dst_negative_advice(sk); 168 dst_negative_advice(sk);
169 if (tp->syn_fastopen || tp->syn_data) 169 if (tp->syn_fastopen || tp->syn_data)
170 tcp_fastopen_cache_set(sk, 0, NULL, true, 0); 170 tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
171 if (tp->syn_data) 171 if (tp->syn_data && icsk->icsk_retransmits == 1)
172 NET_INC_STATS_BH(sock_net(sk), 172 NET_INC_STATS_BH(sock_net(sk),
173 LINUX_MIB_TCPFASTOPENACTIVEFAIL); 173 LINUX_MIB_TCPFASTOPENACTIVEFAIL);
174 } 174 }
@@ -176,6 +176,18 @@ static int tcp_write_timeout(struct sock *sk)
176 syn_set = true; 176 syn_set = true;
177 } else { 177 } else {
178 if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) { 178 if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) {
179 /* Some middle-boxes may black-hole Fast Open _after_
180 * the handshake. Therefore we conservatively disable
181 * Fast Open on this path on recurring timeouts with
182 * few or zero bytes acked after Fast Open.
183 */
184 if (tp->syn_data_acked &&
185 tp->bytes_acked <= tp->rx_opt.mss_clamp) {
186 tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
187 if (icsk->icsk_retransmits == sysctl_tcp_retries1)
188 NET_INC_STATS_BH(sock_net(sk),
189 LINUX_MIB_TCPFASTOPENACTIVEFAIL);
190 }
179 /* Black hole detection */ 191 /* Black hole detection */
180 tcp_mtu_probing(icsk, sk); 192 tcp_mtu_probing(icsk, sk);
181 193
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 24ec14f9825c..0c7b0e61b917 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -100,7 +100,6 @@
100#include <linux/slab.h> 100#include <linux/slab.h>
101#include <net/tcp_states.h> 101#include <net/tcp_states.h>
102#include <linux/skbuff.h> 102#include <linux/skbuff.h>
103#include <linux/netdevice.h>
104#include <linux/proc_fs.h> 103#include <linux/proc_fs.h>
105#include <linux/seq_file.h> 104#include <linux/seq_file.h>
106#include <net/net_namespace.h> 105#include <net/net_namespace.h>
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index d84742f003a9..61f26851655c 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3642,7 +3642,7 @@ static void addrconf_dad_work(struct work_struct *w)
3642 3642
3643 /* send a neighbour solicitation for our addr */ 3643 /* send a neighbour solicitation for our addr */
3644 addrconf_addr_solict_mult(&ifp->addr, &mcaddr); 3644 addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
3645 ndisc_send_ns(ifp->idev->dev, &ifp->addr, &mcaddr, &in6addr_any, NULL); 3645 ndisc_send_ns(ifp->idev->dev, &ifp->addr, &mcaddr, &in6addr_any);
3646out: 3646out:
3647 in6_ifa_put(ifp); 3647 in6_ifa_put(ifp);
3648 rtnl_unlock(); 3648 rtnl_unlock();
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 44bb66bde0e2..8ec0df75f1c4 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -428,9 +428,11 @@ void inet6_destroy_sock(struct sock *sk)
428 428
429 /* Free tx options */ 429 /* Free tx options */
430 430
431 opt = xchg(&np->opt, NULL); 431 opt = xchg((__force struct ipv6_txoptions **)&np->opt, NULL);
432 if (opt) 432 if (opt) {
433 sock_kfree_s(sk, opt, opt->tot_len); 433 atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
434 txopt_put(opt);
435 }
434} 436}
435EXPORT_SYMBOL_GPL(inet6_destroy_sock); 437EXPORT_SYMBOL_GPL(inet6_destroy_sock);
436 438
@@ -659,7 +661,10 @@ int inet6_sk_rebuild_header(struct sock *sk)
659 fl6.fl6_sport = inet->inet_sport; 661 fl6.fl6_sport = inet->inet_sport;
660 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 662 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
661 663
662 final_p = fl6_update_dst(&fl6, np->opt, &final); 664 rcu_read_lock();
665 final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt),
666 &final);
667 rcu_read_unlock();
663 668
664 dst = ip6_dst_lookup_flow(sk, &fl6, final_p); 669 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
665 if (IS_ERR(dst)) { 670 if (IS_ERR(dst)) {
@@ -668,7 +673,7 @@ int inet6_sk_rebuild_header(struct sock *sk)
668 return PTR_ERR(dst); 673 return PTR_ERR(dst);
669 } 674 }
670 675
671 __ip6_dst_store(sk, dst, NULL, NULL); 676 ip6_dst_store(sk, dst, NULL, NULL);
672 } 677 }
673 678
674 return 0; 679 return 0;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index d70b0238f468..517c55b01ba8 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -167,8 +167,10 @@ ipv4_connected:
167 167
168 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 168 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
169 169
170 opt = flowlabel ? flowlabel->opt : np->opt; 170 rcu_read_lock();
171 opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt);
171 final_p = fl6_update_dst(&fl6, opt, &final); 172 final_p = fl6_update_dst(&fl6, opt, &final);
173 rcu_read_unlock();
172 174
173 dst = ip6_dst_lookup_flow(sk, &fl6, final_p); 175 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
174 err = 0; 176 err = 0;
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index ce203b0402be..ea7c4d64a00a 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -727,6 +727,7 @@ ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt)
727 *((char **)&opt2->dst1opt) += dif; 727 *((char **)&opt2->dst1opt) += dif;
728 if (opt2->srcrt) 728 if (opt2->srcrt)
729 *((char **)&opt2->srcrt) += dif; 729 *((char **)&opt2->srcrt) += dif;
730 atomic_set(&opt2->refcnt, 1);
730 } 731 }
731 return opt2; 732 return opt2;
732} 733}
@@ -790,7 +791,7 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
790 return ERR_PTR(-ENOBUFS); 791 return ERR_PTR(-ENOBUFS);
791 792
792 memset(opt2, 0, tot_len); 793 memset(opt2, 0, tot_len);
793 794 atomic_set(&opt2->refcnt, 1);
794 opt2->tot_len = tot_len; 795 opt2->tot_len = tot_len;
795 p = (char *)(opt2 + 1); 796 p = (char *)(opt2 + 1);
796 797
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 36c5a98b0472..0a37ddc7af51 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -834,11 +834,6 @@ void icmpv6_flow_init(struct sock *sk, struct flowi6 *fl6,
834 security_sk_classify_flow(sk, flowi6_to_flowi(fl6)); 834 security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
835} 835}
836 836
837/*
838 * Special lock-class for __icmpv6_sk:
839 */
840static struct lock_class_key icmpv6_socket_sk_dst_lock_key;
841
842static int __net_init icmpv6_sk_init(struct net *net) 837static int __net_init icmpv6_sk_init(struct net *net)
843{ 838{
844 struct sock *sk; 839 struct sock *sk;
@@ -860,15 +855,6 @@ static int __net_init icmpv6_sk_init(struct net *net)
860 855
861 net->ipv6.icmp_sk[i] = sk; 856 net->ipv6.icmp_sk[i] = sk;
862 857
863 /*
864 * Split off their lock-class, because sk->sk_dst_lock
865 * gets used from softirqs, which is safe for
866 * __icmpv6_sk (because those never get directly used
867 * via userspace syscalls), but unsafe for normal sockets.
868 */
869 lockdep_set_class(&sk->sk_dst_lock,
870 &icmpv6_socket_sk_dst_lock_key);
871
872 /* Enough space for 2 64K ICMP packets, including 858 /* Enough space for 2 64K ICMP packets, including
873 * sk_buff struct overhead. 859 * sk_buff struct overhead.
874 */ 860 */
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 5d1c7cee2cb2..a7ca2cde2ecb 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -78,7 +78,9 @@ struct dst_entry *inet6_csk_route_req(const struct sock *sk,
78 memset(fl6, 0, sizeof(*fl6)); 78 memset(fl6, 0, sizeof(*fl6));
79 fl6->flowi6_proto = proto; 79 fl6->flowi6_proto = proto;
80 fl6->daddr = ireq->ir_v6_rmt_addr; 80 fl6->daddr = ireq->ir_v6_rmt_addr;
81 final_p = fl6_update_dst(fl6, np->opt, &final); 81 rcu_read_lock();
82 final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
83 rcu_read_unlock();
82 fl6->saddr = ireq->ir_v6_loc_addr; 84 fl6->saddr = ireq->ir_v6_loc_addr;
83 fl6->flowi6_oif = ireq->ir_iif; 85 fl6->flowi6_oif = ireq->ir_iif;
84 fl6->flowi6_mark = ireq->ir_mark; 86 fl6->flowi6_mark = ireq->ir_mark;
@@ -109,14 +111,6 @@ void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
109EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr); 111EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr);
110 112
111static inline 113static inline
112void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
113 const struct in6_addr *daddr,
114 const struct in6_addr *saddr)
115{
116 __ip6_dst_store(sk, dst, daddr, saddr);
117}
118
119static inline
120struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie) 114struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
121{ 115{
122 return __sk_dst_check(sk, cookie); 116 return __sk_dst_check(sk, cookie);
@@ -142,14 +136,16 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk,
142 fl6->fl6_dport = inet->inet_dport; 136 fl6->fl6_dport = inet->inet_dport;
143 security_sk_classify_flow(sk, flowi6_to_flowi(fl6)); 137 security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
144 138
145 final_p = fl6_update_dst(fl6, np->opt, &final); 139 rcu_read_lock();
140 final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
141 rcu_read_unlock();
146 142
147 dst = __inet6_csk_dst_check(sk, np->dst_cookie); 143 dst = __inet6_csk_dst_check(sk, np->dst_cookie);
148 if (!dst) { 144 if (!dst) {
149 dst = ip6_dst_lookup_flow(sk, fl6, final_p); 145 dst = ip6_dst_lookup_flow(sk, fl6, final_p);
150 146
151 if (!IS_ERR(dst)) 147 if (!IS_ERR(dst))
152 __inet6_csk_dst_store(sk, dst, NULL, NULL); 148 ip6_dst_store(sk, dst, NULL, NULL);
153 } 149 }
154 return dst; 150 return dst;
155} 151}
@@ -175,7 +171,8 @@ int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused
175 /* Restore final destination back after routing done */ 171 /* Restore final destination back after routing done */
176 fl6.daddr = sk->sk_v6_daddr; 172 fl6.daddr = sk->sk_v6_daddr;
177 173
178 res = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass); 174 res = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt),
175 np->tclass);
179 rcu_read_unlock(); 176 rcu_read_unlock();
180 return res; 177 return res;
181} 178}
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index eabffbb89795..137fca42aaa6 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -177,7 +177,7 @@ void ip6_tnl_dst_reset(struct ip6_tnl *t)
177 int i; 177 int i;
178 178
179 for_each_possible_cpu(i) 179 for_each_possible_cpu(i)
180 ip6_tnl_per_cpu_dst_set(raw_cpu_ptr(t->dst_cache), NULL); 180 ip6_tnl_per_cpu_dst_set(per_cpu_ptr(t->dst_cache, i), NULL);
181} 181}
182EXPORT_SYMBOL_GPL(ip6_tnl_dst_reset); 182EXPORT_SYMBOL_GPL(ip6_tnl_dst_reset);
183 183
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index ad19136086dd..a10e77103c88 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -118,7 +118,7 @@ static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
118 int cmd); 118 int cmd);
119static int ip6mr_rtm_dumproute(struct sk_buff *skb, 119static int ip6mr_rtm_dumproute(struct sk_buff *skb,
120 struct netlink_callback *cb); 120 struct netlink_callback *cb);
121static void mroute_clean_tables(struct mr6_table *mrt); 121static void mroute_clean_tables(struct mr6_table *mrt, bool all);
122static void ipmr_expire_process(unsigned long arg); 122static void ipmr_expire_process(unsigned long arg);
123 123
124#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES 124#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
@@ -334,7 +334,7 @@ static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
334static void ip6mr_free_table(struct mr6_table *mrt) 334static void ip6mr_free_table(struct mr6_table *mrt)
335{ 335{
336 del_timer_sync(&mrt->ipmr_expire_timer); 336 del_timer_sync(&mrt->ipmr_expire_timer);
337 mroute_clean_tables(mrt); 337 mroute_clean_tables(mrt, true);
338 kfree(mrt); 338 kfree(mrt);
339} 339}
340 340
@@ -765,10 +765,6 @@ static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt)
765 return dev; 765 return dev;
766 766
767failure: 767failure:
768 /* allow the register to be completed before unregistering. */
769 rtnl_unlock();
770 rtnl_lock();
771
772 unregister_netdevice(dev); 768 unregister_netdevice(dev);
773 return NULL; 769 return NULL;
774} 770}
@@ -1542,7 +1538,7 @@ static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
1542 * Close the multicast socket, and clear the vif tables etc 1538 * Close the multicast socket, and clear the vif tables etc
1543 */ 1539 */
1544 1540
1545static void mroute_clean_tables(struct mr6_table *mrt) 1541static void mroute_clean_tables(struct mr6_table *mrt, bool all)
1546{ 1542{
1547 int i; 1543 int i;
1548 LIST_HEAD(list); 1544 LIST_HEAD(list);
@@ -1552,8 +1548,9 @@ static void mroute_clean_tables(struct mr6_table *mrt)
1552 * Shut down all active vif entries 1548 * Shut down all active vif entries
1553 */ 1549 */
1554 for (i = 0; i < mrt->maxvif; i++) { 1550 for (i = 0; i < mrt->maxvif; i++) {
1555 if (!(mrt->vif6_table[i].flags & VIFF_STATIC)) 1551 if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
1556 mif6_delete(mrt, i, &list); 1552 continue;
1553 mif6_delete(mrt, i, &list);
1557 } 1554 }
1558 unregister_netdevice_many(&list); 1555 unregister_netdevice_many(&list);
1559 1556
@@ -1562,7 +1559,7 @@ static void mroute_clean_tables(struct mr6_table *mrt)
1562 */ 1559 */
1563 for (i = 0; i < MFC6_LINES; i++) { 1560 for (i = 0; i < MFC6_LINES; i++) {
1564 list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) { 1561 list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) {
1565 if (c->mfc_flags & MFC_STATIC) 1562 if (!all && (c->mfc_flags & MFC_STATIC))
1566 continue; 1563 continue;
1567 write_lock_bh(&mrt_lock); 1564 write_lock_bh(&mrt_lock);
1568 list_del(&c->list); 1565 list_del(&c->list);
@@ -1625,7 +1622,7 @@ int ip6mr_sk_done(struct sock *sk)
1625 net->ipv6.devconf_all); 1622 net->ipv6.devconf_all);
1626 write_unlock_bh(&mrt_lock); 1623 write_unlock_bh(&mrt_lock);
1627 1624
1628 mroute_clean_tables(mrt); 1625 mroute_clean_tables(mrt, false);
1629 err = 0; 1626 err = 0;
1630 break; 1627 break;
1631 } 1628 }
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 63e6956917c9..4449ad1f8114 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -111,7 +111,8 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
111 icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie); 111 icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
112 } 112 }
113 } 113 }
114 opt = xchg(&inet6_sk(sk)->opt, opt); 114 opt = xchg((__force struct ipv6_txoptions **)&inet6_sk(sk)->opt,
115 opt);
115 sk_dst_reset(sk); 116 sk_dst_reset(sk);
116 117
117 return opt; 118 return opt;
@@ -231,9 +232,12 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
231 sk->sk_socket->ops = &inet_dgram_ops; 232 sk->sk_socket->ops = &inet_dgram_ops;
232 sk->sk_family = PF_INET; 233 sk->sk_family = PF_INET;
233 } 234 }
234 opt = xchg(&np->opt, NULL); 235 opt = xchg((__force struct ipv6_txoptions **)&np->opt,
235 if (opt) 236 NULL);
236 sock_kfree_s(sk, opt, opt->tot_len); 237 if (opt) {
238 atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
239 txopt_put(opt);
240 }
237 pktopt = xchg(&np->pktoptions, NULL); 241 pktopt = xchg(&np->pktoptions, NULL);
238 kfree_skb(pktopt); 242 kfree_skb(pktopt);
239 243
@@ -403,7 +407,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
403 if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW)) 407 if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW))
404 break; 408 break;
405 409
406 opt = ipv6_renew_options(sk, np->opt, optname, 410 opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
411 opt = ipv6_renew_options(sk, opt, optname,
407 (struct ipv6_opt_hdr __user *)optval, 412 (struct ipv6_opt_hdr __user *)optval,
408 optlen); 413 optlen);
409 if (IS_ERR(opt)) { 414 if (IS_ERR(opt)) {
@@ -432,8 +437,10 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
432 retv = 0; 437 retv = 0;
433 opt = ipv6_update_options(sk, opt); 438 opt = ipv6_update_options(sk, opt);
434sticky_done: 439sticky_done:
435 if (opt) 440 if (opt) {
436 sock_kfree_s(sk, opt, opt->tot_len); 441 atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
442 txopt_put(opt);
443 }
437 break; 444 break;
438 } 445 }
439 446
@@ -486,6 +493,7 @@ sticky_done:
486 break; 493 break;
487 494
488 memset(opt, 0, sizeof(*opt)); 495 memset(opt, 0, sizeof(*opt));
496 atomic_set(&opt->refcnt, 1);
489 opt->tot_len = sizeof(*opt) + optlen; 497 opt->tot_len = sizeof(*opt) + optlen;
490 retv = -EFAULT; 498 retv = -EFAULT;
491 if (copy_from_user(opt+1, optval, optlen)) 499 if (copy_from_user(opt+1, optval, optlen))
@@ -502,8 +510,10 @@ update:
502 retv = 0; 510 retv = 0;
503 opt = ipv6_update_options(sk, opt); 511 opt = ipv6_update_options(sk, opt);
504done: 512done:
505 if (opt) 513 if (opt) {
506 sock_kfree_s(sk, opt, opt->tot_len); 514 atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
515 txopt_put(opt);
516 }
507 break; 517 break;
508 } 518 }
509 case IPV6_UNICAST_HOPS: 519 case IPV6_UNICAST_HOPS:
@@ -1110,10 +1120,11 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
1110 case IPV6_RTHDR: 1120 case IPV6_RTHDR:
1111 case IPV6_DSTOPTS: 1121 case IPV6_DSTOPTS:
1112 { 1122 {
1123 struct ipv6_txoptions *opt;
1113 1124
1114 lock_sock(sk); 1125 lock_sock(sk);
1115 len = ipv6_getsockopt_sticky(sk, np->opt, 1126 opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
1116 optname, optval, len); 1127 len = ipv6_getsockopt_sticky(sk, opt, optname, optval, len);
1117 release_sock(sk); 1128 release_sock(sk);
1118 /* check if ipv6_getsockopt_sticky() returns err code */ 1129 /* check if ipv6_getsockopt_sticky() returns err code */
1119 if (len < 0) 1130 if (len < 0)
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 124338a39e29..5ee56d0a8699 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1651,7 +1651,6 @@ out:
1651 if (!err) { 1651 if (!err) {
1652 ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT); 1652 ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
1653 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); 1653 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1654 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
1655 } else { 1654 } else {
1656 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS); 1655 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
1657 } 1656 }
@@ -2015,7 +2014,6 @@ out:
2015 if (!err) { 2014 if (!err) {
2016 ICMP6MSGOUT_INC_STATS(net, idev, type); 2015 ICMP6MSGOUT_INC_STATS(net, idev, type);
2017 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); 2016 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
2018 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, full_len);
2019 } else 2017 } else
2020 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS); 2018 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
2021 2019
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 3e0f855e1bea..d6161e1c48c8 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -556,8 +556,7 @@ static void ndisc_send_unsol_na(struct net_device *dev)
556} 556}
557 557
558void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit, 558void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit,
559 const struct in6_addr *daddr, const struct in6_addr *saddr, 559 const struct in6_addr *daddr, const struct in6_addr *saddr)
560 struct sk_buff *oskb)
561{ 560{
562 struct sk_buff *skb; 561 struct sk_buff *skb;
563 struct in6_addr addr_buf; 562 struct in6_addr addr_buf;
@@ -593,9 +592,6 @@ void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit,
593 ndisc_fill_addr_option(skb, ND_OPT_SOURCE_LL_ADDR, 592 ndisc_fill_addr_option(skb, ND_OPT_SOURCE_LL_ADDR,
594 dev->dev_addr); 593 dev->dev_addr);
595 594
596 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE) && oskb)
597 skb_dst_copy(skb, oskb);
598
599 ndisc_send_skb(skb, daddr, saddr); 595 ndisc_send_skb(skb, daddr, saddr);
600} 596}
601 597
@@ -682,12 +678,12 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb)
682 "%s: trying to ucast probe in NUD_INVALID: %pI6\n", 678 "%s: trying to ucast probe in NUD_INVALID: %pI6\n",
683 __func__, target); 679 __func__, target);
684 } 680 }
685 ndisc_send_ns(dev, target, target, saddr, skb); 681 ndisc_send_ns(dev, target, target, saddr);
686 } else if ((probes -= NEIGH_VAR(neigh->parms, APP_PROBES)) < 0) { 682 } else if ((probes -= NEIGH_VAR(neigh->parms, APP_PROBES)) < 0) {
687 neigh_app_ns(neigh); 683 neigh_app_ns(neigh);
688 } else { 684 } else {
689 addrconf_addr_solict_mult(target, &mcaddr); 685 addrconf_addr_solict_mult(target, &mcaddr);
690 ndisc_send_ns(dev, target, &mcaddr, saddr, skb); 686 ndisc_send_ns(dev, target, &mcaddr, saddr);
691 } 687 }
692} 688}
693 689
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index d5efeb87350e..bab4441ed4e4 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -190,7 +190,7 @@ static void nf_ct_frag6_expire(unsigned long data)
190/* Creation primitives. */ 190/* Creation primitives. */
191static inline struct frag_queue *fq_find(struct net *net, __be32 id, 191static inline struct frag_queue *fq_find(struct net *net, __be32 id,
192 u32 user, struct in6_addr *src, 192 u32 user, struct in6_addr *src,
193 struct in6_addr *dst, u8 ecn) 193 struct in6_addr *dst, int iif, u8 ecn)
194{ 194{
195 struct inet_frag_queue *q; 195 struct inet_frag_queue *q;
196 struct ip6_create_arg arg; 196 struct ip6_create_arg arg;
@@ -200,6 +200,7 @@ static inline struct frag_queue *fq_find(struct net *net, __be32 id,
200 arg.user = user; 200 arg.user = user;
201 arg.src = src; 201 arg.src = src;
202 arg.dst = dst; 202 arg.dst = dst;
203 arg.iif = iif;
203 arg.ecn = ecn; 204 arg.ecn = ecn;
204 205
205 local_bh_disable(); 206 local_bh_disable();
@@ -601,7 +602,7 @@ struct sk_buff *nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 use
601 fhdr = (struct frag_hdr *)skb_transport_header(clone); 602 fhdr = (struct frag_hdr *)skb_transport_header(clone);
602 603
603 fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr, 604 fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr,
604 ip6_frag_ecn(hdr)); 605 skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
605 if (fq == NULL) { 606 if (fq == NULL) {
606 pr_debug("Can't find and can't create new queue\n"); 607 pr_debug("Can't find and can't create new queue\n");
607 goto ret_orig; 608 goto ret_orig;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index dc65ec198f7c..99140986e887 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -733,6 +733,7 @@ static int raw6_getfrag(void *from, char *to, int offset, int len, int odd,
733 733
734static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) 734static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
735{ 735{
736 struct ipv6_txoptions *opt_to_free = NULL;
736 struct ipv6_txoptions opt_space; 737 struct ipv6_txoptions opt_space;
737 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); 738 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
738 struct in6_addr *daddr, *final_p, final; 739 struct in6_addr *daddr, *final_p, final;
@@ -839,8 +840,10 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
839 if (!(opt->opt_nflen|opt->opt_flen)) 840 if (!(opt->opt_nflen|opt->opt_flen))
840 opt = NULL; 841 opt = NULL;
841 } 842 }
842 if (!opt) 843 if (!opt) {
843 opt = np->opt; 844 opt = txopt_get(np);
845 opt_to_free = opt;
846 }
844 if (flowlabel) 847 if (flowlabel)
845 opt = fl6_merge_options(&opt_space, flowlabel, opt); 848 opt = fl6_merge_options(&opt_space, flowlabel, opt);
846 opt = ipv6_fixup_options(&opt_space, opt); 849 opt = ipv6_fixup_options(&opt_space, opt);
@@ -906,6 +909,7 @@ done:
906 dst_release(dst); 909 dst_release(dst);
907out: 910out:
908 fl6_sock_release(flowlabel); 911 fl6_sock_release(flowlabel);
912 txopt_put(opt_to_free);
909 return err < 0 ? err : len; 913 return err < 0 ? err : len;
910do_confirm: 914do_confirm:
911 dst_confirm(dst); 915 dst_confirm(dst);
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 44e21a03cfc3..45f5ae51de65 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -108,7 +108,10 @@ bool ip6_frag_match(const struct inet_frag_queue *q, const void *a)
108 return fq->id == arg->id && 108 return fq->id == arg->id &&
109 fq->user == arg->user && 109 fq->user == arg->user &&
110 ipv6_addr_equal(&fq->saddr, arg->src) && 110 ipv6_addr_equal(&fq->saddr, arg->src) &&
111 ipv6_addr_equal(&fq->daddr, arg->dst); 111 ipv6_addr_equal(&fq->daddr, arg->dst) &&
112 (arg->iif == fq->iif ||
113 !(ipv6_addr_type(arg->dst) & (IPV6_ADDR_MULTICAST |
114 IPV6_ADDR_LINKLOCAL)));
112} 115}
113EXPORT_SYMBOL(ip6_frag_match); 116EXPORT_SYMBOL(ip6_frag_match);
114 117
@@ -180,7 +183,7 @@ static void ip6_frag_expire(unsigned long data)
180 183
181static struct frag_queue * 184static struct frag_queue *
182fq_find(struct net *net, __be32 id, const struct in6_addr *src, 185fq_find(struct net *net, __be32 id, const struct in6_addr *src,
183 const struct in6_addr *dst, u8 ecn) 186 const struct in6_addr *dst, int iif, u8 ecn)
184{ 187{
185 struct inet_frag_queue *q; 188 struct inet_frag_queue *q;
186 struct ip6_create_arg arg; 189 struct ip6_create_arg arg;
@@ -190,6 +193,7 @@ fq_find(struct net *net, __be32 id, const struct in6_addr *src,
190 arg.user = IP6_DEFRAG_LOCAL_DELIVER; 193 arg.user = IP6_DEFRAG_LOCAL_DELIVER;
191 arg.src = src; 194 arg.src = src;
192 arg.dst = dst; 195 arg.dst = dst;
196 arg.iif = iif;
193 arg.ecn = ecn; 197 arg.ecn = ecn;
194 198
195 hash = inet6_hash_frag(id, src, dst); 199 hash = inet6_hash_frag(id, src, dst);
@@ -551,7 +555,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
551 } 555 }
552 556
553 fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr, 557 fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
554 ip6_frag_ecn(hdr)); 558 skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
555 if (fq) { 559 if (fq) {
556 int ret; 560 int ret;
557 561
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index c8bc9b4ac328..826e6aa44f8d 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -404,6 +404,14 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
404 } 404 }
405} 405}
406 406
407static bool __rt6_check_expired(const struct rt6_info *rt)
408{
409 if (rt->rt6i_flags & RTF_EXPIRES)
410 return time_after(jiffies, rt->dst.expires);
411 else
412 return false;
413}
414
407static bool rt6_check_expired(const struct rt6_info *rt) 415static bool rt6_check_expired(const struct rt6_info *rt)
408{ 416{
409 if (rt->rt6i_flags & RTF_EXPIRES) { 417 if (rt->rt6i_flags & RTF_EXPIRES) {
@@ -515,7 +523,7 @@ static void rt6_probe_deferred(struct work_struct *w)
515 container_of(w, struct __rt6_probe_work, work); 523 container_of(w, struct __rt6_probe_work, work);
516 524
517 addrconf_addr_solict_mult(&work->target, &mcaddr); 525 addrconf_addr_solict_mult(&work->target, &mcaddr);
518 ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, NULL); 526 ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL);
519 dev_put(work->dev); 527 dev_put(work->dev);
520 kfree(work); 528 kfree(work);
521} 529}
@@ -1252,7 +1260,8 @@ static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
1252 1260
1253static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie) 1261static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie)
1254{ 1262{
1255 if (rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK && 1263 if (!__rt6_check_expired(rt) &&
1264 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1256 rt6_check((struct rt6_info *)(rt->dst.from), cookie)) 1265 rt6_check((struct rt6_info *)(rt->dst.from), cookie))
1257 return &rt->dst; 1266 return &rt->dst;
1258 else 1267 else
@@ -1272,7 +1281,8 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
1272 1281
1273 rt6_dst_from_metrics_check(rt); 1282 rt6_dst_from_metrics_check(rt);
1274 1283
1275 if ((rt->rt6i_flags & RTF_PCPU) || unlikely(dst->flags & DST_NOCACHE)) 1284 if (rt->rt6i_flags & RTF_PCPU ||
1285 (unlikely(dst->flags & DST_NOCACHE) && rt->dst.from))
1276 return rt6_dst_from_check(rt, cookie); 1286 return rt6_dst_from_check(rt, cookie);
1277 else 1287 else
1278 return rt6_check(rt, cookie); 1288 return rt6_check(rt, cookie);
@@ -1322,6 +1332,12 @@ static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
1322 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires); 1332 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
1323} 1333}
1324 1334
1335static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
1336{
1337 return !(rt->rt6i_flags & RTF_CACHE) &&
1338 (rt->rt6i_flags & RTF_PCPU || rt->rt6i_node);
1339}
1340
1325static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, 1341static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
1326 const struct ipv6hdr *iph, u32 mtu) 1342 const struct ipv6hdr *iph, u32 mtu)
1327{ 1343{
@@ -1335,7 +1351,7 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
1335 if (mtu >= dst_mtu(dst)) 1351 if (mtu >= dst_mtu(dst))
1336 return; 1352 return;
1337 1353
1338 if (rt6->rt6i_flags & RTF_CACHE) { 1354 if (!rt6_cache_allowed_for_pmtu(rt6)) {
1339 rt6_do_update_pmtu(rt6, mtu); 1355 rt6_do_update_pmtu(rt6, mtu);
1340 } else { 1356 } else {
1341 const struct in6_addr *daddr, *saddr; 1357 const struct in6_addr *daddr, *saddr;
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index bb8f2fa1c7fb..eaf7ac496d50 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -222,7 +222,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
222 memset(&fl6, 0, sizeof(fl6)); 222 memset(&fl6, 0, sizeof(fl6));
223 fl6.flowi6_proto = IPPROTO_TCP; 223 fl6.flowi6_proto = IPPROTO_TCP;
224 fl6.daddr = ireq->ir_v6_rmt_addr; 224 fl6.daddr = ireq->ir_v6_rmt_addr;
225 final_p = fl6_update_dst(&fl6, np->opt, &final); 225 final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
226 fl6.saddr = ireq->ir_v6_loc_addr; 226 fl6.saddr = ireq->ir_v6_loc_addr;
227 fl6.flowi6_oif = sk->sk_bound_dev_if; 227 fl6.flowi6_oif = sk->sk_bound_dev_if;
228 fl6.flowi6_mark = ireq->ir_mark; 228 fl6.flowi6_mark = ireq->ir_mark;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 5baa8e754e41..e7aab561b7b4 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -120,6 +120,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
120 struct ipv6_pinfo *np = inet6_sk(sk); 120 struct ipv6_pinfo *np = inet6_sk(sk);
121 struct tcp_sock *tp = tcp_sk(sk); 121 struct tcp_sock *tp = tcp_sk(sk);
122 struct in6_addr *saddr = NULL, *final_p, final; 122 struct in6_addr *saddr = NULL, *final_p, final;
123 struct ipv6_txoptions *opt;
123 struct flowi6 fl6; 124 struct flowi6 fl6;
124 struct dst_entry *dst; 125 struct dst_entry *dst;
125 int addr_type; 126 int addr_type;
@@ -235,7 +236,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
235 fl6.fl6_dport = usin->sin6_port; 236 fl6.fl6_dport = usin->sin6_port;
236 fl6.fl6_sport = inet->inet_sport; 237 fl6.fl6_sport = inet->inet_sport;
237 238
238 final_p = fl6_update_dst(&fl6, np->opt, &final); 239 opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
240 final_p = fl6_update_dst(&fl6, opt, &final);
239 241
240 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 242 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
241 243
@@ -255,7 +257,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
255 inet->inet_rcv_saddr = LOOPBACK4_IPV6; 257 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
256 258
257 sk->sk_gso_type = SKB_GSO_TCPV6; 259 sk->sk_gso_type = SKB_GSO_TCPV6;
258 __ip6_dst_store(sk, dst, NULL, NULL); 260 ip6_dst_store(sk, dst, NULL, NULL);
259 261
260 if (tcp_death_row.sysctl_tw_recycle && 262 if (tcp_death_row.sysctl_tw_recycle &&
261 !tp->rx_opt.ts_recent_stamp && 263 !tp->rx_opt.ts_recent_stamp &&
@@ -263,9 +265,9 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
263 tcp_fetch_timewait_stamp(sk, dst); 265 tcp_fetch_timewait_stamp(sk, dst);
264 266
265 icsk->icsk_ext_hdr_len = 0; 267 icsk->icsk_ext_hdr_len = 0;
266 if (np->opt) 268 if (opt)
267 icsk->icsk_ext_hdr_len = (np->opt->opt_flen + 269 icsk->icsk_ext_hdr_len = opt->opt_flen +
268 np->opt->opt_nflen); 270 opt->opt_nflen;
269 271
270 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); 272 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
271 273
@@ -461,7 +463,8 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
461 if (np->repflow && ireq->pktopts) 463 if (np->repflow && ireq->pktopts)
462 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts)); 464 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
463 465
464 err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass); 466 err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt),
467 np->tclass);
465 err = net_xmit_eval(err); 468 err = net_xmit_eval(err);
466 } 469 }
467 470
@@ -972,6 +975,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
972 struct inet_request_sock *ireq; 975 struct inet_request_sock *ireq;
973 struct ipv6_pinfo *newnp; 976 struct ipv6_pinfo *newnp;
974 const struct ipv6_pinfo *np = inet6_sk(sk); 977 const struct ipv6_pinfo *np = inet6_sk(sk);
978 struct ipv6_txoptions *opt;
975 struct tcp6_sock *newtcp6sk; 979 struct tcp6_sock *newtcp6sk;
976 struct inet_sock *newinet; 980 struct inet_sock *newinet;
977 struct tcp_sock *newtp; 981 struct tcp_sock *newtp;
@@ -1056,7 +1060,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
1056 */ 1060 */
1057 1061
1058 newsk->sk_gso_type = SKB_GSO_TCPV6; 1062 newsk->sk_gso_type = SKB_GSO_TCPV6;
1059 __ip6_dst_store(newsk, dst, NULL, NULL); 1063 ip6_dst_store(newsk, dst, NULL, NULL);
1060 inet6_sk_rx_dst_set(newsk, skb); 1064 inet6_sk_rx_dst_set(newsk, skb);
1061 1065
1062 newtcp6sk = (struct tcp6_sock *)newsk; 1066 newtcp6sk = (struct tcp6_sock *)newsk;
@@ -1098,13 +1102,15 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
1098 but we make one more one thing there: reattach optmem 1102 but we make one more one thing there: reattach optmem
1099 to newsk. 1103 to newsk.
1100 */ 1104 */
1101 if (np->opt) 1105 opt = rcu_dereference(np->opt);
1102 newnp->opt = ipv6_dup_options(newsk, np->opt); 1106 if (opt) {
1103 1107 opt = ipv6_dup_options(newsk, opt);
1108 RCU_INIT_POINTER(newnp->opt, opt);
1109 }
1104 inet_csk(newsk)->icsk_ext_hdr_len = 0; 1110 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1105 if (newnp->opt) 1111 if (opt)
1106 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen + 1112 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1107 newnp->opt->opt_flen); 1113 opt->opt_flen;
1108 1114
1109 tcp_ca_openreq_child(newsk, dst); 1115 tcp_ca_openreq_child(newsk, dst);
1110 1116
@@ -1690,6 +1696,8 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1690 const struct tcp_sock *tp = tcp_sk(sp); 1696 const struct tcp_sock *tp = tcp_sk(sp);
1691 const struct inet_connection_sock *icsk = inet_csk(sp); 1697 const struct inet_connection_sock *icsk = inet_csk(sp);
1692 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq; 1698 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1699 int rx_queue;
1700 int state;
1693 1701
1694 dest = &sp->sk_v6_daddr; 1702 dest = &sp->sk_v6_daddr;
1695 src = &sp->sk_v6_rcv_saddr; 1703 src = &sp->sk_v6_rcv_saddr;
@@ -1710,6 +1718,15 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1710 timer_expires = jiffies; 1718 timer_expires = jiffies;
1711 } 1719 }
1712 1720
1721 state = sk_state_load(sp);
1722 if (state == TCP_LISTEN)
1723 rx_queue = sp->sk_ack_backlog;
1724 else
1725 /* Because we don't lock the socket,
1726 * we might find a transient negative value.
1727 */
1728 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1729
1713 seq_printf(seq, 1730 seq_printf(seq,
1714 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 1731 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1715 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n", 1732 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
@@ -1718,9 +1735,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1718 src->s6_addr32[2], src->s6_addr32[3], srcp, 1735 src->s6_addr32[2], src->s6_addr32[3], srcp,
1719 dest->s6_addr32[0], dest->s6_addr32[1], 1736 dest->s6_addr32[0], dest->s6_addr32[1],
1720 dest->s6_addr32[2], dest->s6_addr32[3], destp, 1737 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1721 sp->sk_state, 1738 state,
1722 tp->write_seq-tp->snd_una, 1739 tp->write_seq - tp->snd_una,
1723 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq), 1740 rx_queue,
1724 timer_active, 1741 timer_active,
1725 jiffies_delta_to_clock_t(timer_expires - jiffies), 1742 jiffies_delta_to_clock_t(timer_expires - jiffies),
1726 icsk->icsk_retransmits, 1743 icsk->icsk_retransmits,
@@ -1732,7 +1749,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1732 jiffies_to_clock_t(icsk->icsk_ack.ato), 1749 jiffies_to_clock_t(icsk->icsk_ack.ato),
1733 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, 1750 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1734 tp->snd_cwnd, 1751 tp->snd_cwnd,
1735 sp->sk_state == TCP_LISTEN ? 1752 state == TCP_LISTEN ?
1736 fastopenq->max_qlen : 1753 fastopenq->max_qlen :
1737 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh) 1754 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1738 ); 1755 );
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 01bcb49619ee..9da3287a3923 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1110,6 +1110,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1110 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); 1110 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
1111 struct in6_addr *daddr, *final_p, final; 1111 struct in6_addr *daddr, *final_p, final;
1112 struct ipv6_txoptions *opt = NULL; 1112 struct ipv6_txoptions *opt = NULL;
1113 struct ipv6_txoptions *opt_to_free = NULL;
1113 struct ip6_flowlabel *flowlabel = NULL; 1114 struct ip6_flowlabel *flowlabel = NULL;
1114 struct flowi6 fl6; 1115 struct flowi6 fl6;
1115 struct dst_entry *dst; 1116 struct dst_entry *dst;
@@ -1263,8 +1264,10 @@ do_udp_sendmsg:
1263 opt = NULL; 1264 opt = NULL;
1264 connected = 0; 1265 connected = 0;
1265 } 1266 }
1266 if (!opt) 1267 if (!opt) {
1267 opt = np->opt; 1268 opt = txopt_get(np);
1269 opt_to_free = opt;
1270 }
1268 if (flowlabel) 1271 if (flowlabel)
1269 opt = fl6_merge_options(&opt_space, flowlabel, opt); 1272 opt = fl6_merge_options(&opt_space, flowlabel, opt);
1270 opt = ipv6_fixup_options(&opt_space, opt); 1273 opt = ipv6_fixup_options(&opt_space, opt);
@@ -1373,6 +1376,7 @@ release_dst:
1373out: 1376out:
1374 dst_release(dst); 1377 dst_release(dst);
1375 fl6_sock_release(flowlabel); 1378 fl6_sock_release(flowlabel);
1379 txopt_put(opt_to_free);
1376 if (!err) 1380 if (!err)
1377 return len; 1381 return len;
1378 /* 1382 /*
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index fcb2752419c6..435608c4306d 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1483,7 +1483,7 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
1483 if (sock_writeable(sk) && iucv_below_msglim(sk)) 1483 if (sock_writeable(sk) && iucv_below_msglim(sk))
1484 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 1484 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1485 else 1485 else
1486 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1486 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1487 1487
1488 return mask; 1488 return mask;
1489} 1489}
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index aca38d8aed8e..a2c8747d2936 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -486,6 +486,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
486 DECLARE_SOCKADDR(struct sockaddr_l2tpip6 *, lsa, msg->msg_name); 486 DECLARE_SOCKADDR(struct sockaddr_l2tpip6 *, lsa, msg->msg_name);
487 struct in6_addr *daddr, *final_p, final; 487 struct in6_addr *daddr, *final_p, final;
488 struct ipv6_pinfo *np = inet6_sk(sk); 488 struct ipv6_pinfo *np = inet6_sk(sk);
489 struct ipv6_txoptions *opt_to_free = NULL;
489 struct ipv6_txoptions *opt = NULL; 490 struct ipv6_txoptions *opt = NULL;
490 struct ip6_flowlabel *flowlabel = NULL; 491 struct ip6_flowlabel *flowlabel = NULL;
491 struct dst_entry *dst = NULL; 492 struct dst_entry *dst = NULL;
@@ -575,8 +576,10 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
575 opt = NULL; 576 opt = NULL;
576 } 577 }
577 578
578 if (opt == NULL) 579 if (!opt) {
579 opt = np->opt; 580 opt = txopt_get(np);
581 opt_to_free = opt;
582 }
580 if (flowlabel) 583 if (flowlabel)
581 opt = fl6_merge_options(&opt_space, flowlabel, opt); 584 opt = fl6_merge_options(&opt_space, flowlabel, opt);
582 opt = ipv6_fixup_options(&opt_space, opt); 585 opt = ipv6_fixup_options(&opt_space, opt);
@@ -631,6 +634,7 @@ done:
631 dst_release(dst); 634 dst_release(dst);
632out: 635out:
633 fl6_sock_release(flowlabel); 636 fl6_sock_release(flowlabel);
637 txopt_put(opt_to_free);
634 638
635 return err < 0 ? err : len; 639 return err < 0 ? err : len;
636 640
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index a758eb84e8f0..ff757181b0a8 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -500,7 +500,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
500 /* send AddBA request */ 500 /* send AddBA request */
501 ieee80211_send_addba_request(sdata, sta->sta.addr, tid, 501 ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
502 tid_tx->dialog_token, start_seq_num, 502 tid_tx->dialog_token, start_seq_num,
503 local->hw.max_tx_aggregation_subframes, 503 IEEE80211_MAX_AMPDU_BUF,
504 tid_tx->timeout); 504 tid_tx->timeout);
505} 505}
506 506
@@ -926,6 +926,7 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
926 amsdu = capab & IEEE80211_ADDBA_PARAM_AMSDU_MASK; 926 amsdu = capab & IEEE80211_ADDBA_PARAM_AMSDU_MASK;
927 tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; 927 tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
928 buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6; 928 buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6;
929 buf_size = min(buf_size, local->hw.max_tx_aggregation_subframes);
929 930
930 mutex_lock(&sta->ampdu_mlme.mtx); 931 mutex_lock(&sta->ampdu_mlme.mtx);
931 932
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index c2bd1b6a6922..da471eef07bb 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -3454,8 +3454,12 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
3454 goto out_unlock; 3454 goto out_unlock;
3455 } 3455 }
3456 } else { 3456 } else {
3457 /* for cookie below */ 3457 /* Assign a dummy non-zero cookie, it's not sent to
3458 ack_skb = skb; 3458 * userspace in this case but we rely on its value
3459 * internally in the need_offchan case to distinguish
3460 * mgmt-tx from remain-on-channel.
3461 */
3462 *cookie = 0xffffffff;
3459 } 3463 }
3460 3464
3461 if (!need_offchan) { 3465 if (!need_offchan) {
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index d0dc1bfaeec2..c9e325d2e120 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -76,7 +76,8 @@ bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata)
76void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata, 76void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata,
77 bool update_bss) 77 bool update_bss)
78{ 78{
79 if (__ieee80211_recalc_txpower(sdata) || update_bss) 79 if (__ieee80211_recalc_txpower(sdata) ||
80 (update_bss && ieee80211_sdata_running(sdata)))
80 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER); 81 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER);
81} 82}
82 83
@@ -1861,6 +1862,7 @@ void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
1861 unregister_netdevice(sdata->dev); 1862 unregister_netdevice(sdata->dev);
1862 } else { 1863 } else {
1863 cfg80211_unregister_wdev(&sdata->wdev); 1864 cfg80211_unregister_wdev(&sdata->wdev);
1865 ieee80211_teardown_sdata(sdata);
1864 kfree(sdata); 1866 kfree(sdata);
1865 } 1867 }
1866} 1868}
@@ -1870,7 +1872,6 @@ void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata)
1870 if (WARN_ON_ONCE(!test_bit(SDATA_STATE_RUNNING, &sdata->state))) 1872 if (WARN_ON_ONCE(!test_bit(SDATA_STATE_RUNNING, &sdata->state)))
1871 return; 1873 return;
1872 ieee80211_do_stop(sdata, true); 1874 ieee80211_do_stop(sdata, true);
1873 ieee80211_teardown_sdata(sdata);
1874} 1875}
1875 1876
1876void ieee80211_remove_interfaces(struct ieee80211_local *local) 1877void ieee80211_remove_interfaces(struct ieee80211_local *local)
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 858f6b1cb149..175ffcf7fb06 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -541,8 +541,7 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
541 NL80211_FEATURE_HT_IBSS | 541 NL80211_FEATURE_HT_IBSS |
542 NL80211_FEATURE_VIF_TXPOWER | 542 NL80211_FEATURE_VIF_TXPOWER |
543 NL80211_FEATURE_MAC_ON_CREATE | 543 NL80211_FEATURE_MAC_ON_CREATE |
544 NL80211_FEATURE_USERSPACE_MPM | 544 NL80211_FEATURE_USERSPACE_MPM;
545 NL80211_FEATURE_FULL_AP_CLIENT_STATE;
546 545
547 if (!ops->hw_scan) 546 if (!ops->hw_scan)
548 wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN | 547 wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN |
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index b890e225a8f1..b3b44a5dd375 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -779,10 +779,8 @@ void mesh_plink_broken(struct sta_info *sta)
779static void mesh_path_node_reclaim(struct rcu_head *rp) 779static void mesh_path_node_reclaim(struct rcu_head *rp)
780{ 780{
781 struct mpath_node *node = container_of(rp, struct mpath_node, rcu); 781 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
782 struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
783 782
784 del_timer_sync(&node->mpath->timer); 783 del_timer_sync(&node->mpath->timer);
785 atomic_dec(&sdata->u.mesh.mpaths);
786 kfree(node->mpath); 784 kfree(node->mpath);
787 kfree(node); 785 kfree(node);
788} 786}
@@ -790,8 +788,9 @@ static void mesh_path_node_reclaim(struct rcu_head *rp)
790/* needs to be called with the corresponding hashwlock taken */ 788/* needs to be called with the corresponding hashwlock taken */
791static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node) 789static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
792{ 790{
793 struct mesh_path *mpath; 791 struct mesh_path *mpath = node->mpath;
794 mpath = node->mpath; 792 struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
793
795 spin_lock(&mpath->state_lock); 794 spin_lock(&mpath->state_lock);
796 mpath->flags |= MESH_PATH_RESOLVING; 795 mpath->flags |= MESH_PATH_RESOLVING;
797 if (mpath->is_gate) 796 if (mpath->is_gate)
@@ -799,6 +798,7 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
799 hlist_del_rcu(&node->list); 798 hlist_del_rcu(&node->list);
800 call_rcu(&node->rcu, mesh_path_node_reclaim); 799 call_rcu(&node->rcu, mesh_path_node_reclaim);
801 spin_unlock(&mpath->state_lock); 800 spin_unlock(&mpath->state_lock);
801 atomic_dec(&sdata->u.mesh.mpaths);
802 atomic_dec(&tbl->entries); 802 atomic_dec(&tbl->entries);
803} 803}
804 804
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 4aeca4b0c3cb..a413e52f7691 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -597,8 +597,8 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
597 /* We need to ensure power level is at max for scanning. */ 597 /* We need to ensure power level is at max for scanning. */
598 ieee80211_hw_config(local, 0); 598 ieee80211_hw_config(local, 0);
599 599
600 if ((req->channels[0]->flags & 600 if ((req->channels[0]->flags & (IEEE80211_CHAN_NO_IR |
601 IEEE80211_CHAN_NO_IR) || 601 IEEE80211_CHAN_RADAR)) ||
602 !req->n_ssids) { 602 !req->n_ssids) {
603 next_delay = IEEE80211_PASSIVE_CHANNEL_TIME; 603 next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
604 } else { 604 } else {
@@ -645,7 +645,7 @@ ieee80211_scan_get_channel_time(struct ieee80211_channel *chan)
645 * TODO: channel switching also consumes quite some time, 645 * TODO: channel switching also consumes quite some time,
646 * add that delay as well to get a better estimation 646 * add that delay as well to get a better estimation
647 */ 647 */
648 if (chan->flags & IEEE80211_CHAN_NO_IR) 648 if (chan->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR))
649 return IEEE80211_PASSIVE_CHANNEL_TIME; 649 return IEEE80211_PASSIVE_CHANNEL_TIME;
650 return IEEE80211_PROBE_DELAY + IEEE80211_CHANNEL_TIME; 650 return IEEE80211_PROBE_DELAY + IEEE80211_CHANNEL_TIME;
651} 651}
@@ -777,7 +777,8 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
777 * 777 *
778 * In any case, it is not necessary for a passive scan. 778 * In any case, it is not necessary for a passive scan.
779 */ 779 */
780 if (chan->flags & IEEE80211_CHAN_NO_IR || !scan_req->n_ssids) { 780 if ((chan->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR)) ||
781 !scan_req->n_ssids) {
781 *next_delay = IEEE80211_PASSIVE_CHANNEL_TIME; 782 *next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
782 local->next_scan_state = SCAN_DECISION; 783 local->next_scan_state = SCAN_DECISION;
783 return; 784 return;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index e22349ea7256..4692782b5280 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -869,7 +869,7 @@ config NETFILTER_XT_TARGET_TEE
869 depends on IPV6 || IPV6=n 869 depends on IPV6 || IPV6=n
870 depends on !NF_CONNTRACK || NF_CONNTRACK 870 depends on !NF_CONNTRACK || NF_CONNTRACK
871 select NF_DUP_IPV4 871 select NF_DUP_IPV4
872 select NF_DUP_IPV6 if IP6_NF_IPTABLES 872 select NF_DUP_IPV6 if IP6_NF_IPTABLES != n
873 ---help--- 873 ---help---
874 This option adds a "TEE" target with which a packet can be cloned and 874 This option adds a "TEE" target with which a packet can be cloned and
875 this clone be rerouted to another nexthop. 875 this clone be rerouted to another nexthop.
@@ -882,7 +882,7 @@ config NETFILTER_XT_TARGET_TPROXY
882 depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n 882 depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n
883 depends on IP_NF_MANGLE 883 depends on IP_NF_MANGLE
884 select NF_DEFRAG_IPV4 884 select NF_DEFRAG_IPV4
885 select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES 885 select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES != n
886 help 886 help
887 This option adds a `TPROXY' target, which is somewhat similar to 887 This option adds a `TPROXY' target, which is somewhat similar to
888 REDIRECT. It can only be used in the mangle table and is useful 888 REDIRECT. It can only be used in the mangle table and is useful
@@ -1375,7 +1375,7 @@ config NETFILTER_XT_MATCH_SOCKET
1375 depends on IPV6 || IPV6=n 1375 depends on IPV6 || IPV6=n
1376 depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n 1376 depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n
1377 select NF_DEFRAG_IPV4 1377 select NF_DEFRAG_IPV4
1378 select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES 1378 select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES != n
1379 help 1379 help
1380 This option adds a `socket' match, which can be used to match 1380 This option adds a `socket' match, which can be used to match
1381 packets for which a TCP or UDP socket lookup finds a valid socket. 1381 packets for which a TCP or UDP socket lookup finds a valid socket.
diff --git a/net/netfilter/ipset/ip_set_bitmap_gen.h b/net/netfilter/ipset/ip_set_bitmap_gen.h
index d05e759ed0fa..b0bc475f641e 100644
--- a/net/netfilter/ipset/ip_set_bitmap_gen.h
+++ b/net/netfilter/ipset/ip_set_bitmap_gen.h
@@ -33,7 +33,7 @@
33#define mtype_gc IPSET_TOKEN(MTYPE, _gc) 33#define mtype_gc IPSET_TOKEN(MTYPE, _gc)
34#define mtype MTYPE 34#define mtype MTYPE
35 35
36#define get_ext(set, map, id) ((map)->extensions + (set)->dsize * (id)) 36#define get_ext(set, map, id) ((map)->extensions + ((set)->dsize * (id)))
37 37
38static void 38static void
39mtype_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set)) 39mtype_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set))
@@ -67,12 +67,9 @@ mtype_destroy(struct ip_set *set)
67 del_timer_sync(&map->gc); 67 del_timer_sync(&map->gc);
68 68
69 ip_set_free(map->members); 69 ip_set_free(map->members);
70 if (set->dsize) { 70 if (set->dsize && set->extensions & IPSET_EXT_DESTROY)
71 if (set->extensions & IPSET_EXT_DESTROY) 71 mtype_ext_cleanup(set);
72 mtype_ext_cleanup(set); 72 ip_set_free(map);
73 ip_set_free(map->extensions);
74 }
75 kfree(map);
76 73
77 set->data = NULL; 74 set->data = NULL;
78} 75}
@@ -92,16 +89,14 @@ mtype_head(struct ip_set *set, struct sk_buff *skb)
92{ 89{
93 const struct mtype *map = set->data; 90 const struct mtype *map = set->data;
94 struct nlattr *nested; 91 struct nlattr *nested;
92 size_t memsize = sizeof(*map) + map->memsize;
95 93
96 nested = ipset_nest_start(skb, IPSET_ATTR_DATA); 94 nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
97 if (!nested) 95 if (!nested)
98 goto nla_put_failure; 96 goto nla_put_failure;
99 if (mtype_do_head(skb, map) || 97 if (mtype_do_head(skb, map) ||
100 nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) || 98 nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
101 nla_put_net32(skb, IPSET_ATTR_MEMSIZE, 99 nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)))
102 htonl(sizeof(*map) +
103 map->memsize +
104 set->dsize * map->elements)))
105 goto nla_put_failure; 100 goto nla_put_failure;
106 if (unlikely(ip_set_put_flags(skb, set))) 101 if (unlikely(ip_set_put_flags(skb, set)))
107 goto nla_put_failure; 102 goto nla_put_failure;
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
index 64a564334418..4783efff0bde 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -41,7 +41,6 @@ MODULE_ALIAS("ip_set_bitmap:ip");
41/* Type structure */ 41/* Type structure */
42struct bitmap_ip { 42struct bitmap_ip {
43 void *members; /* the set members */ 43 void *members; /* the set members */
44 void *extensions; /* data extensions */
45 u32 first_ip; /* host byte order, included in range */ 44 u32 first_ip; /* host byte order, included in range */
46 u32 last_ip; /* host byte order, included in range */ 45 u32 last_ip; /* host byte order, included in range */
47 u32 elements; /* number of max elements in the set */ 46 u32 elements; /* number of max elements in the set */
@@ -49,6 +48,8 @@ struct bitmap_ip {
49 size_t memsize; /* members size */ 48 size_t memsize; /* members size */
50 u8 netmask; /* subnet netmask */ 49 u8 netmask; /* subnet netmask */
51 struct timer_list gc; /* garbage collection */ 50 struct timer_list gc; /* garbage collection */
51 unsigned char extensions[0] /* data extensions */
52 __aligned(__alignof__(u64));
52}; 53};
53 54
54/* ADT structure for generic function args */ 55/* ADT structure for generic function args */
@@ -224,13 +225,6 @@ init_map_ip(struct ip_set *set, struct bitmap_ip *map,
224 map->members = ip_set_alloc(map->memsize); 225 map->members = ip_set_alloc(map->memsize);
225 if (!map->members) 226 if (!map->members)
226 return false; 227 return false;
227 if (set->dsize) {
228 map->extensions = ip_set_alloc(set->dsize * elements);
229 if (!map->extensions) {
230 kfree(map->members);
231 return false;
232 }
233 }
234 map->first_ip = first_ip; 228 map->first_ip = first_ip;
235 map->last_ip = last_ip; 229 map->last_ip = last_ip;
236 map->elements = elements; 230 map->elements = elements;
@@ -316,13 +310,13 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
316 pr_debug("hosts %u, elements %llu\n", 310 pr_debug("hosts %u, elements %llu\n",
317 hosts, (unsigned long long)elements); 311 hosts, (unsigned long long)elements);
318 312
319 map = kzalloc(sizeof(*map), GFP_KERNEL); 313 set->dsize = ip_set_elem_len(set, tb, 0, 0);
314 map = ip_set_alloc(sizeof(*map) + elements * set->dsize);
320 if (!map) 315 if (!map)
321 return -ENOMEM; 316 return -ENOMEM;
322 317
323 map->memsize = bitmap_bytes(0, elements - 1); 318 map->memsize = bitmap_bytes(0, elements - 1);
324 set->variant = &bitmap_ip; 319 set->variant = &bitmap_ip;
325 set->dsize = ip_set_elem_len(set, tb, 0);
326 if (!init_map_ip(set, map, first_ip, last_ip, 320 if (!init_map_ip(set, map, first_ip, last_ip,
327 elements, hosts, netmask)) { 321 elements, hosts, netmask)) {
328 kfree(map); 322 kfree(map);
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index 1430535118fb..29dde208381d 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -47,24 +47,26 @@ enum {
47/* Type structure */ 47/* Type structure */
48struct bitmap_ipmac { 48struct bitmap_ipmac {
49 void *members; /* the set members */ 49 void *members; /* the set members */
50 void *extensions; /* MAC + data extensions */
51 u32 first_ip; /* host byte order, included in range */ 50 u32 first_ip; /* host byte order, included in range */
52 u32 last_ip; /* host byte order, included in range */ 51 u32 last_ip; /* host byte order, included in range */
53 u32 elements; /* number of max elements in the set */ 52 u32 elements; /* number of max elements in the set */
54 size_t memsize; /* members size */ 53 size_t memsize; /* members size */
55 struct timer_list gc; /* garbage collector */ 54 struct timer_list gc; /* garbage collector */
55 unsigned char extensions[0] /* MAC + data extensions */
56 __aligned(__alignof__(u64));
56}; 57};
57 58
58/* ADT structure for generic function args */ 59/* ADT structure for generic function args */
59struct bitmap_ipmac_adt_elem { 60struct bitmap_ipmac_adt_elem {
61 unsigned char ether[ETH_ALEN] __aligned(2);
60 u16 id; 62 u16 id;
61 unsigned char *ether; 63 u16 add_mac;
62}; 64};
63 65
64struct bitmap_ipmac_elem { 66struct bitmap_ipmac_elem {
65 unsigned char ether[ETH_ALEN]; 67 unsigned char ether[ETH_ALEN];
66 unsigned char filled; 68 unsigned char filled;
67} __attribute__ ((aligned)); 69} __aligned(__alignof__(u64));
68 70
69static inline u32 71static inline u32
70ip_to_id(const struct bitmap_ipmac *m, u32 ip) 72ip_to_id(const struct bitmap_ipmac *m, u32 ip)
@@ -72,11 +74,11 @@ ip_to_id(const struct bitmap_ipmac *m, u32 ip)
72 return ip - m->first_ip; 74 return ip - m->first_ip;
73} 75}
74 76
75static inline struct bitmap_ipmac_elem * 77#define get_elem(extensions, id, dsize) \
76get_elem(void *extensions, u16 id, size_t dsize) 78 (struct bitmap_ipmac_elem *)(extensions + (id) * (dsize))
77{ 79
78 return (struct bitmap_ipmac_elem *)(extensions + id * dsize); 80#define get_const_elem(extensions, id, dsize) \
79} 81 (const struct bitmap_ipmac_elem *)(extensions + (id) * (dsize))
80 82
81/* Common functions */ 83/* Common functions */
82 84
@@ -88,10 +90,9 @@ bitmap_ipmac_do_test(const struct bitmap_ipmac_adt_elem *e,
88 90
89 if (!test_bit(e->id, map->members)) 91 if (!test_bit(e->id, map->members))
90 return 0; 92 return 0;
91 elem = get_elem(map->extensions, e->id, dsize); 93 elem = get_const_elem(map->extensions, e->id, dsize);
92 if (elem->filled == MAC_FILLED) 94 if (e->add_mac && elem->filled == MAC_FILLED)
93 return !e->ether || 95 return ether_addr_equal(e->ether, elem->ether);
94 ether_addr_equal(e->ether, elem->ether);
95 /* Trigger kernel to fill out the ethernet address */ 96 /* Trigger kernel to fill out the ethernet address */
96 return -EAGAIN; 97 return -EAGAIN;
97} 98}
@@ -103,7 +104,7 @@ bitmap_ipmac_gc_test(u16 id, const struct bitmap_ipmac *map, size_t dsize)
103 104
104 if (!test_bit(id, map->members)) 105 if (!test_bit(id, map->members))
105 return 0; 106 return 0;
106 elem = get_elem(map->extensions, id, dsize); 107 elem = get_const_elem(map->extensions, id, dsize);
107 /* Timer not started for the incomplete elements */ 108 /* Timer not started for the incomplete elements */
108 return elem->filled == MAC_FILLED; 109 return elem->filled == MAC_FILLED;
109} 110}
@@ -133,7 +134,7 @@ bitmap_ipmac_add_timeout(unsigned long *timeout,
133 * and we can reuse it later when MAC is filled out, 134 * and we can reuse it later when MAC is filled out,
134 * possibly by the kernel 135 * possibly by the kernel
135 */ 136 */
136 if (e->ether) 137 if (e->add_mac)
137 ip_set_timeout_set(timeout, t); 138 ip_set_timeout_set(timeout, t);
138 else 139 else
139 *timeout = t; 140 *timeout = t;
@@ -150,7 +151,7 @@ bitmap_ipmac_do_add(const struct bitmap_ipmac_adt_elem *e,
150 elem = get_elem(map->extensions, e->id, dsize); 151 elem = get_elem(map->extensions, e->id, dsize);
151 if (test_bit(e->id, map->members)) { 152 if (test_bit(e->id, map->members)) {
152 if (elem->filled == MAC_FILLED) { 153 if (elem->filled == MAC_FILLED) {
153 if (e->ether && 154 if (e->add_mac &&
154 (flags & IPSET_FLAG_EXIST) && 155 (flags & IPSET_FLAG_EXIST) &&
155 !ether_addr_equal(e->ether, elem->ether)) { 156 !ether_addr_equal(e->ether, elem->ether)) {
156 /* memcpy isn't atomic */ 157 /* memcpy isn't atomic */
@@ -159,7 +160,7 @@ bitmap_ipmac_do_add(const struct bitmap_ipmac_adt_elem *e,
159 ether_addr_copy(elem->ether, e->ether); 160 ether_addr_copy(elem->ether, e->ether);
160 } 161 }
161 return IPSET_ADD_FAILED; 162 return IPSET_ADD_FAILED;
162 } else if (!e->ether) 163 } else if (!e->add_mac)
163 /* Already added without ethernet address */ 164 /* Already added without ethernet address */
164 return IPSET_ADD_FAILED; 165 return IPSET_ADD_FAILED;
165 /* Fill the MAC address and trigger the timer activation */ 166 /* Fill the MAC address and trigger the timer activation */
@@ -168,7 +169,7 @@ bitmap_ipmac_do_add(const struct bitmap_ipmac_adt_elem *e,
168 ether_addr_copy(elem->ether, e->ether); 169 ether_addr_copy(elem->ether, e->ether);
169 elem->filled = MAC_FILLED; 170 elem->filled = MAC_FILLED;
170 return IPSET_ADD_START_STORED_TIMEOUT; 171 return IPSET_ADD_START_STORED_TIMEOUT;
171 } else if (e->ether) { 172 } else if (e->add_mac) {
172 /* We can store MAC too */ 173 /* We can store MAC too */
173 ether_addr_copy(elem->ether, e->ether); 174 ether_addr_copy(elem->ether, e->ether);
174 elem->filled = MAC_FILLED; 175 elem->filled = MAC_FILLED;
@@ -191,7 +192,7 @@ bitmap_ipmac_do_list(struct sk_buff *skb, const struct bitmap_ipmac *map,
191 u32 id, size_t dsize) 192 u32 id, size_t dsize)
192{ 193{
193 const struct bitmap_ipmac_elem *elem = 194 const struct bitmap_ipmac_elem *elem =
194 get_elem(map->extensions, id, dsize); 195 get_const_elem(map->extensions, id, dsize);
195 196
196 return nla_put_ipaddr4(skb, IPSET_ATTR_IP, 197 return nla_put_ipaddr4(skb, IPSET_ATTR_IP,
197 htonl(map->first_ip + id)) || 198 htonl(map->first_ip + id)) ||
@@ -213,7 +214,7 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
213{ 214{
214 struct bitmap_ipmac *map = set->data; 215 struct bitmap_ipmac *map = set->data;
215 ipset_adtfn adtfn = set->variant->adt[adt]; 216 ipset_adtfn adtfn = set->variant->adt[adt];
216 struct bitmap_ipmac_adt_elem e = { .id = 0 }; 217 struct bitmap_ipmac_adt_elem e = { .id = 0, .add_mac = 1 };
217 struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); 218 struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
218 u32 ip; 219 u32 ip;
219 220
@@ -231,7 +232,7 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
231 return -EINVAL; 232 return -EINVAL;
232 233
233 e.id = ip_to_id(map, ip); 234 e.id = ip_to_id(map, ip);
234 e.ether = eth_hdr(skb)->h_source; 235 memcpy(e.ether, eth_hdr(skb)->h_source, ETH_ALEN);
235 236
236 return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); 237 return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
237} 238}
@@ -265,11 +266,10 @@ bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[],
265 return -IPSET_ERR_BITMAP_RANGE; 266 return -IPSET_ERR_BITMAP_RANGE;
266 267
267 e.id = ip_to_id(map, ip); 268 e.id = ip_to_id(map, ip);
268 if (tb[IPSET_ATTR_ETHER]) 269 if (tb[IPSET_ATTR_ETHER]) {
269 e.ether = nla_data(tb[IPSET_ATTR_ETHER]); 270 memcpy(e.ether, nla_data(tb[IPSET_ATTR_ETHER]), ETH_ALEN);
270 else 271 e.add_mac = 1;
271 e.ether = NULL; 272 }
272
273 ret = adtfn(set, &e, &ext, &ext, flags); 273 ret = adtfn(set, &e, &ext, &ext, flags);
274 274
275 return ip_set_eexist(ret, flags) ? 0 : ret; 275 return ip_set_eexist(ret, flags) ? 0 : ret;
@@ -300,13 +300,6 @@ init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
300 map->members = ip_set_alloc(map->memsize); 300 map->members = ip_set_alloc(map->memsize);
301 if (!map->members) 301 if (!map->members)
302 return false; 302 return false;
303 if (set->dsize) {
304 map->extensions = ip_set_alloc(set->dsize * elements);
305 if (!map->extensions) {
306 kfree(map->members);
307 return false;
308 }
309 }
310 map->first_ip = first_ip; 303 map->first_ip = first_ip;
311 map->last_ip = last_ip; 304 map->last_ip = last_ip;
312 map->elements = elements; 305 map->elements = elements;
@@ -361,14 +354,15 @@ bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
361 if (elements > IPSET_BITMAP_MAX_RANGE + 1) 354 if (elements > IPSET_BITMAP_MAX_RANGE + 1)
362 return -IPSET_ERR_BITMAP_RANGE_SIZE; 355 return -IPSET_ERR_BITMAP_RANGE_SIZE;
363 356
364 map = kzalloc(sizeof(*map), GFP_KERNEL); 357 set->dsize = ip_set_elem_len(set, tb,
358 sizeof(struct bitmap_ipmac_elem),
359 __alignof__(struct bitmap_ipmac_elem));
360 map = ip_set_alloc(sizeof(*map) + elements * set->dsize);
365 if (!map) 361 if (!map)
366 return -ENOMEM; 362 return -ENOMEM;
367 363
368 map->memsize = bitmap_bytes(0, elements - 1); 364 map->memsize = bitmap_bytes(0, elements - 1);
369 set->variant = &bitmap_ipmac; 365 set->variant = &bitmap_ipmac;
370 set->dsize = ip_set_elem_len(set, tb,
371 sizeof(struct bitmap_ipmac_elem));
372 if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) { 366 if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) {
373 kfree(map); 367 kfree(map);
374 return -ENOMEM; 368 return -ENOMEM;
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
index 5338ccd5da46..7f0c733358a4 100644
--- a/net/netfilter/ipset/ip_set_bitmap_port.c
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
@@ -35,12 +35,13 @@ MODULE_ALIAS("ip_set_bitmap:port");
35/* Type structure */ 35/* Type structure */
36struct bitmap_port { 36struct bitmap_port {
37 void *members; /* the set members */ 37 void *members; /* the set members */
38 void *extensions; /* data extensions */
39 u16 first_port; /* host byte order, included in range */ 38 u16 first_port; /* host byte order, included in range */
40 u16 last_port; /* host byte order, included in range */ 39 u16 last_port; /* host byte order, included in range */
41 u32 elements; /* number of max elements in the set */ 40 u32 elements; /* number of max elements in the set */
42 size_t memsize; /* members size */ 41 size_t memsize; /* members size */
43 struct timer_list gc; /* garbage collection */ 42 struct timer_list gc; /* garbage collection */
43 unsigned char extensions[0] /* data extensions */
44 __aligned(__alignof__(u64));
44}; 45};
45 46
46/* ADT structure for generic function args */ 47/* ADT structure for generic function args */
@@ -209,13 +210,6 @@ init_map_port(struct ip_set *set, struct bitmap_port *map,
209 map->members = ip_set_alloc(map->memsize); 210 map->members = ip_set_alloc(map->memsize);
210 if (!map->members) 211 if (!map->members)
211 return false; 212 return false;
212 if (set->dsize) {
213 map->extensions = ip_set_alloc(set->dsize * map->elements);
214 if (!map->extensions) {
215 kfree(map->members);
216 return false;
217 }
218 }
219 map->first_port = first_port; 213 map->first_port = first_port;
220 map->last_port = last_port; 214 map->last_port = last_port;
221 set->timeout = IPSET_NO_TIMEOUT; 215 set->timeout = IPSET_NO_TIMEOUT;
@@ -232,6 +226,7 @@ bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
232{ 226{
233 struct bitmap_port *map; 227 struct bitmap_port *map;
234 u16 first_port, last_port; 228 u16 first_port, last_port;
229 u32 elements;
235 230
236 if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || 231 if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
237 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT_TO) || 232 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT_TO) ||
@@ -248,14 +243,15 @@ bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
248 last_port = tmp; 243 last_port = tmp;
249 } 244 }
250 245
251 map = kzalloc(sizeof(*map), GFP_KERNEL); 246 elements = last_port - first_port + 1;
247 set->dsize = ip_set_elem_len(set, tb, 0, 0);
248 map = ip_set_alloc(sizeof(*map) + elements * set->dsize);
252 if (!map) 249 if (!map)
253 return -ENOMEM; 250 return -ENOMEM;
254 251
255 map->elements = last_port - first_port + 1; 252 map->elements = elements;
256 map->memsize = bitmap_bytes(0, map->elements); 253 map->memsize = bitmap_bytes(0, map->elements);
257 set->variant = &bitmap_port; 254 set->variant = &bitmap_port;
258 set->dsize = ip_set_elem_len(set, tb, 0);
259 if (!init_map_port(set, map, first_port, last_port)) { 255 if (!init_map_port(set, map, first_port, last_port)) {
260 kfree(map); 256 kfree(map);
261 return -ENOMEM; 257 return -ENOMEM;
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 69ab9c2634e1..54f3d7cb23e6 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -364,25 +364,27 @@ add_extension(enum ip_set_ext_id id, u32 flags, struct nlattr *tb[])
364} 364}
365 365
366size_t 366size_t
367ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len) 367ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len,
368 size_t align)
368{ 369{
369 enum ip_set_ext_id id; 370 enum ip_set_ext_id id;
370 size_t offset = len;
371 u32 cadt_flags = 0; 371 u32 cadt_flags = 0;
372 372
373 if (tb[IPSET_ATTR_CADT_FLAGS]) 373 if (tb[IPSET_ATTR_CADT_FLAGS])
374 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); 374 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
375 if (cadt_flags & IPSET_FLAG_WITH_FORCEADD) 375 if (cadt_flags & IPSET_FLAG_WITH_FORCEADD)
376 set->flags |= IPSET_CREATE_FLAG_FORCEADD; 376 set->flags |= IPSET_CREATE_FLAG_FORCEADD;
377 if (!align)
378 align = 1;
377 for (id = 0; id < IPSET_EXT_ID_MAX; id++) { 379 for (id = 0; id < IPSET_EXT_ID_MAX; id++) {
378 if (!add_extension(id, cadt_flags, tb)) 380 if (!add_extension(id, cadt_flags, tb))
379 continue; 381 continue;
380 offset = ALIGN(offset, ip_set_extensions[id].align); 382 len = ALIGN(len, ip_set_extensions[id].align);
381 set->offset[id] = offset; 383 set->offset[id] = len;
382 set->extensions |= ip_set_extensions[id].type; 384 set->extensions |= ip_set_extensions[id].type;
383 offset += ip_set_extensions[id].len; 385 len += ip_set_extensions[id].len;
384 } 386 }
385 return offset; 387 return ALIGN(len, align);
386} 388}
387EXPORT_SYMBOL_GPL(ip_set_elem_len); 389EXPORT_SYMBOL_GPL(ip_set_elem_len);
388 390
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index 691b54fcaf2a..e5336ab36d67 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -72,8 +72,9 @@ struct hbucket {
72 DECLARE_BITMAP(used, AHASH_MAX_TUNED); 72 DECLARE_BITMAP(used, AHASH_MAX_TUNED);
73 u8 size; /* size of the array */ 73 u8 size; /* size of the array */
74 u8 pos; /* position of the first free entry */ 74 u8 pos; /* position of the first free entry */
75 unsigned char value[0]; /* the array of the values */ 75 unsigned char value[0] /* the array of the values */
76} __attribute__ ((aligned)); 76 __aligned(__alignof__(u64));
77};
77 78
78/* The hash table: the table size stored here in order to make resizing easy */ 79/* The hash table: the table size stored here in order to make resizing easy */
79struct htable { 80struct htable {
@@ -475,7 +476,7 @@ static void
475mtype_expire(struct ip_set *set, struct htype *h, u8 nets_length, size_t dsize) 476mtype_expire(struct ip_set *set, struct htype *h, u8 nets_length, size_t dsize)
476{ 477{
477 struct htable *t; 478 struct htable *t;
478 struct hbucket *n; 479 struct hbucket *n, *tmp;
479 struct mtype_elem *data; 480 struct mtype_elem *data;
480 u32 i, j, d; 481 u32 i, j, d;
481#ifdef IP_SET_HASH_WITH_NETS 482#ifdef IP_SET_HASH_WITH_NETS
@@ -510,9 +511,14 @@ mtype_expire(struct ip_set *set, struct htype *h, u8 nets_length, size_t dsize)
510 } 511 }
511 } 512 }
512 if (d >= AHASH_INIT_SIZE) { 513 if (d >= AHASH_INIT_SIZE) {
513 struct hbucket *tmp = kzalloc(sizeof(*tmp) + 514 if (d >= n->size) {
514 (n->size - AHASH_INIT_SIZE) * dsize, 515 rcu_assign_pointer(hbucket(t, i), NULL);
515 GFP_ATOMIC); 516 kfree_rcu(n, rcu);
517 continue;
518 }
519 tmp = kzalloc(sizeof(*tmp) +
520 (n->size - AHASH_INIT_SIZE) * dsize,
521 GFP_ATOMIC);
516 if (!tmp) 522 if (!tmp)
517 /* Still try to delete expired elements */ 523 /* Still try to delete expired elements */
518 continue; 524 continue;
@@ -522,7 +528,7 @@ mtype_expire(struct ip_set *set, struct htype *h, u8 nets_length, size_t dsize)
522 continue; 528 continue;
523 data = ahash_data(n, j, dsize); 529 data = ahash_data(n, j, dsize);
524 memcpy(tmp->value + d * dsize, data, dsize); 530 memcpy(tmp->value + d * dsize, data, dsize);
525 set_bit(j, tmp->used); 531 set_bit(d, tmp->used);
526 d++; 532 d++;
527 } 533 }
528 tmp->pos = d; 534 tmp->pos = d;
@@ -1323,12 +1329,14 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
1323#endif 1329#endif
1324 set->variant = &IPSET_TOKEN(HTYPE, 4_variant); 1330 set->variant = &IPSET_TOKEN(HTYPE, 4_variant);
1325 set->dsize = ip_set_elem_len(set, tb, 1331 set->dsize = ip_set_elem_len(set, tb,
1326 sizeof(struct IPSET_TOKEN(HTYPE, 4_elem))); 1332 sizeof(struct IPSET_TOKEN(HTYPE, 4_elem)),
1333 __alignof__(struct IPSET_TOKEN(HTYPE, 4_elem)));
1327#ifndef IP_SET_PROTO_UNDEF 1334#ifndef IP_SET_PROTO_UNDEF
1328 } else { 1335 } else {
1329 set->variant = &IPSET_TOKEN(HTYPE, 6_variant); 1336 set->variant = &IPSET_TOKEN(HTYPE, 6_variant);
1330 set->dsize = ip_set_elem_len(set, tb, 1337 set->dsize = ip_set_elem_len(set, tb,
1331 sizeof(struct IPSET_TOKEN(HTYPE, 6_elem))); 1338 sizeof(struct IPSET_TOKEN(HTYPE, 6_elem)),
1339 __alignof__(struct IPSET_TOKEN(HTYPE, 6_elem)));
1332 } 1340 }
1333#endif 1341#endif
1334 if (tb[IPSET_ATTR_TIMEOUT]) { 1342 if (tb[IPSET_ATTR_TIMEOUT]) {
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index 5a30ce6e8c90..bbede95c9f68 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -31,7 +31,7 @@ struct set_elem {
31 struct rcu_head rcu; 31 struct rcu_head rcu;
32 struct list_head list; 32 struct list_head list;
33 ip_set_id_t id; 33 ip_set_id_t id;
34}; 34} __aligned(__alignof__(u64));
35 35
36struct set_adt_elem { 36struct set_adt_elem {
37 ip_set_id_t id; 37 ip_set_id_t id;
@@ -618,7 +618,8 @@ list_set_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
618 size = IP_SET_LIST_MIN_SIZE; 618 size = IP_SET_LIST_MIN_SIZE;
619 619
620 set->variant = &set_variant; 620 set->variant = &set_variant;
621 set->dsize = ip_set_elem_len(set, tb, sizeof(struct set_elem)); 621 set->dsize = ip_set_elem_len(set, tb, sizeof(struct set_elem),
622 __alignof__(struct set_elem));
622 if (!init_list_set(net, set, size)) 623 if (!init_list_set(net, set, size))
623 return -ENOMEM; 624 return -ENOMEM;
624 if (tb[IPSET_ATTR_TIMEOUT]) { 625 if (tb[IPSET_ATTR_TIMEOUT]) {
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 1e24fff53e4b..f57b4dcdb233 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1176,6 +1176,7 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in
1176 struct ip_vs_protocol *pp; 1176 struct ip_vs_protocol *pp;
1177 struct ip_vs_proto_data *pd; 1177 struct ip_vs_proto_data *pd;
1178 struct ip_vs_conn *cp; 1178 struct ip_vs_conn *cp;
1179 struct sock *sk;
1179 1180
1180 EnterFunction(11); 1181 EnterFunction(11);
1181 1182
@@ -1183,13 +1184,12 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in
1183 if (skb->ipvs_property) 1184 if (skb->ipvs_property)
1184 return NF_ACCEPT; 1185 return NF_ACCEPT;
1185 1186
1187 sk = skb_to_full_sk(skb);
1186 /* Bad... Do not break raw sockets */ 1188 /* Bad... Do not break raw sockets */
1187 if (unlikely(skb->sk != NULL && hooknum == NF_INET_LOCAL_OUT && 1189 if (unlikely(sk && hooknum == NF_INET_LOCAL_OUT &&
1188 af == AF_INET)) { 1190 af == AF_INET)) {
1189 struct sock *sk = skb->sk;
1190 struct inet_sock *inet = inet_sk(skb->sk);
1191 1191
1192 if (inet && sk->sk_family == PF_INET && inet->nodefrag) 1192 if (sk->sk_family == PF_INET && inet_sk(sk)->nodefrag)
1193 return NF_ACCEPT; 1193 return NF_ACCEPT;
1194 } 1194 }
1195 1195
@@ -1681,6 +1681,7 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
1681 struct ip_vs_conn *cp; 1681 struct ip_vs_conn *cp;
1682 int ret, pkts; 1682 int ret, pkts;
1683 int conn_reuse_mode; 1683 int conn_reuse_mode;
1684 struct sock *sk;
1684 1685
1685 /* Already marked as IPVS request or reply? */ 1686 /* Already marked as IPVS request or reply? */
1686 if (skb->ipvs_property) 1687 if (skb->ipvs_property)
@@ -1708,12 +1709,11 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
1708 ip_vs_fill_iph_skb(af, skb, false, &iph); 1709 ip_vs_fill_iph_skb(af, skb, false, &iph);
1709 1710
1710 /* Bad... Do not break raw sockets */ 1711 /* Bad... Do not break raw sockets */
1711 if (unlikely(skb->sk != NULL && hooknum == NF_INET_LOCAL_OUT && 1712 sk = skb_to_full_sk(skb);
1713 if (unlikely(sk && hooknum == NF_INET_LOCAL_OUT &&
1712 af == AF_INET)) { 1714 af == AF_INET)) {
1713 struct sock *sk = skb->sk;
1714 struct inet_sock *inet = inet_sk(skb->sk);
1715 1715
1716 if (inet && sk->sk_family == PF_INET && inet->nodefrag) 1716 if (sk->sk_family == PF_INET && inet_sk(sk)->nodefrag)
1717 return NF_ACCEPT; 1717 return NF_ACCEPT;
1718 } 1718 }
1719 1719
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 06eb48fceb42..740cce4685ac 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -825,7 +825,7 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
825 struct net *net = sock_net(ctnl); 825 struct net *net = sock_net(ctnl);
826 struct nfnl_log_net *log = nfnl_log_pernet(net); 826 struct nfnl_log_net *log = nfnl_log_pernet(net);
827 int ret = 0; 827 int ret = 0;
828 u16 flags; 828 u16 flags = 0;
829 829
830 if (nfula[NFULA_CFG_CMD]) { 830 if (nfula[NFULA_CFG_CMD]) {
831 u_int8_t pf = nfmsg->nfgen_family; 831 u_int8_t pf = nfmsg->nfgen_family;
diff --git a/net/netfilter/nft_counter.c b/net/netfilter/nft_counter.c
index 1067fb4c1ffa..c7808fc19719 100644
--- a/net/netfilter/nft_counter.c
+++ b/net/netfilter/nft_counter.c
@@ -47,27 +47,34 @@ static void nft_counter_eval(const struct nft_expr *expr,
47 local_bh_enable(); 47 local_bh_enable();
48} 48}
49 49
50static int nft_counter_dump(struct sk_buff *skb, const struct nft_expr *expr) 50static void nft_counter_fetch(const struct nft_counter_percpu __percpu *counter,
51 struct nft_counter *total)
51{ 52{
52 struct nft_counter_percpu_priv *priv = nft_expr_priv(expr); 53 const struct nft_counter_percpu *cpu_stats;
53 struct nft_counter_percpu *cpu_stats;
54 struct nft_counter total;
55 u64 bytes, packets; 54 u64 bytes, packets;
56 unsigned int seq; 55 unsigned int seq;
57 int cpu; 56 int cpu;
58 57
59 memset(&total, 0, sizeof(total)); 58 memset(total, 0, sizeof(*total));
60 for_each_possible_cpu(cpu) { 59 for_each_possible_cpu(cpu) {
61 cpu_stats = per_cpu_ptr(priv->counter, cpu); 60 cpu_stats = per_cpu_ptr(counter, cpu);
62 do { 61 do {
63 seq = u64_stats_fetch_begin_irq(&cpu_stats->syncp); 62 seq = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
64 bytes = cpu_stats->counter.bytes; 63 bytes = cpu_stats->counter.bytes;
65 packets = cpu_stats->counter.packets; 64 packets = cpu_stats->counter.packets;
66 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq)); 65 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq));
67 66
68 total.packets += packets; 67 total->packets += packets;
69 total.bytes += bytes; 68 total->bytes += bytes;
70 } 69 }
70}
71
72static int nft_counter_dump(struct sk_buff *skb, const struct nft_expr *expr)
73{
74 struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
75 struct nft_counter total;
76
77 nft_counter_fetch(priv->counter, &total);
71 78
72 if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes)) || 79 if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes)) ||
73 nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.packets))) 80 nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.packets)))
@@ -118,6 +125,31 @@ static void nft_counter_destroy(const struct nft_ctx *ctx,
118 free_percpu(priv->counter); 125 free_percpu(priv->counter);
119} 126}
120 127
128static int nft_counter_clone(struct nft_expr *dst, const struct nft_expr *src)
129{
130 struct nft_counter_percpu_priv *priv = nft_expr_priv(src);
131 struct nft_counter_percpu_priv *priv_clone = nft_expr_priv(dst);
132 struct nft_counter_percpu __percpu *cpu_stats;
133 struct nft_counter_percpu *this_cpu;
134 struct nft_counter total;
135
136 nft_counter_fetch(priv->counter, &total);
137
138 cpu_stats = __netdev_alloc_pcpu_stats(struct nft_counter_percpu,
139 GFP_ATOMIC);
140 if (cpu_stats == NULL)
141 return ENOMEM;
142
143 preempt_disable();
144 this_cpu = this_cpu_ptr(cpu_stats);
145 this_cpu->counter.packets = total.packets;
146 this_cpu->counter.bytes = total.bytes;
147 preempt_enable();
148
149 priv_clone->counter = cpu_stats;
150 return 0;
151}
152
121static struct nft_expr_type nft_counter_type; 153static struct nft_expr_type nft_counter_type;
122static const struct nft_expr_ops nft_counter_ops = { 154static const struct nft_expr_ops nft_counter_ops = {
123 .type = &nft_counter_type, 155 .type = &nft_counter_type,
@@ -126,6 +158,7 @@ static const struct nft_expr_ops nft_counter_ops = {
126 .init = nft_counter_init, 158 .init = nft_counter_init,
127 .destroy = nft_counter_destroy, 159 .destroy = nft_counter_destroy,
128 .dump = nft_counter_dump, 160 .dump = nft_counter_dump,
161 .clone = nft_counter_clone,
129}; 162};
130 163
131static struct nft_expr_type nft_counter_type __read_mostly = { 164static struct nft_expr_type nft_counter_type __read_mostly = {
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 513a8ef60a59..9dec3bd1b63c 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -50,8 +50,9 @@ static void *nft_dynset_new(struct nft_set *set, const struct nft_expr *expr,
50 } 50 }
51 51
52 ext = nft_set_elem_ext(set, elem); 52 ext = nft_set_elem_ext(set, elem);
53 if (priv->expr != NULL) 53 if (priv->expr != NULL &&
54 nft_expr_clone(nft_set_ext_expr(ext), priv->expr); 54 nft_expr_clone(nft_set_ext_expr(ext), priv->expr) < 0)
55 return NULL;
55 56
56 return elem; 57 return elem;
57} 58}
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index b7de0da46acd..ecf0a0196f18 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -572,7 +572,7 @@ static unsigned int llcp_sock_poll(struct file *file, struct socket *sock,
572 if (sock_writeable(sk) && sk->sk_state == LLCP_CONNECTED) 572 if (sock_writeable(sk) && sk->sk_state == LLCP_CONNECTED)
573 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 573 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
574 else 574 else
575 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 575 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
576 576
577 pr_debug("mask 0x%x\n", mask); 577 pr_debug("mask 0x%x\n", mask);
578 578
diff --git a/net/openvswitch/dp_notify.c b/net/openvswitch/dp_notify.c
index a7a80a6b77b0..653d073bae45 100644
--- a/net/openvswitch/dp_notify.c
+++ b/net/openvswitch/dp_notify.c
@@ -58,7 +58,7 @@ void ovs_dp_notify_wq(struct work_struct *work)
58 struct hlist_node *n; 58 struct hlist_node *n;
59 59
60 hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node) { 60 hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node) {
61 if (vport->ops->type != OVS_VPORT_TYPE_NETDEV) 61 if (vport->ops->type == OVS_VPORT_TYPE_INTERNAL)
62 continue; 62 continue;
63 63
64 if (!(vport->dev->priv_flags & IFF_OVS_DATAPATH)) 64 if (!(vport->dev->priv_flags & IFF_OVS_DATAPATH))
diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c
index efb736bb6855..e41cd12d9b2d 100644
--- a/net/openvswitch/vport-geneve.c
+++ b/net/openvswitch/vport-geneve.c
@@ -117,7 +117,6 @@ static struct vport_ops ovs_geneve_vport_ops = {
117 .destroy = ovs_netdev_tunnel_destroy, 117 .destroy = ovs_netdev_tunnel_destroy,
118 .get_options = geneve_get_options, 118 .get_options = geneve_get_options,
119 .send = dev_queue_xmit, 119 .send = dev_queue_xmit,
120 .owner = THIS_MODULE,
121}; 120};
122 121
123static int __init ovs_geneve_tnl_init(void) 122static int __init ovs_geneve_tnl_init(void)
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index c3257d78d3d2..7f8897f33a67 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -89,7 +89,6 @@ static struct vport_ops ovs_gre_vport_ops = {
89 .create = gre_create, 89 .create = gre_create,
90 .send = dev_queue_xmit, 90 .send = dev_queue_xmit,
91 .destroy = ovs_netdev_tunnel_destroy, 91 .destroy = ovs_netdev_tunnel_destroy,
92 .owner = THIS_MODULE,
93}; 92};
94 93
95static int __init ovs_gre_tnl_init(void) 94static int __init ovs_gre_tnl_init(void)
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index b327368a3848..6b0190b987ec 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -180,9 +180,13 @@ void ovs_netdev_tunnel_destroy(struct vport *vport)
180 if (vport->dev->priv_flags & IFF_OVS_DATAPATH) 180 if (vport->dev->priv_flags & IFF_OVS_DATAPATH)
181 ovs_netdev_detach_dev(vport); 181 ovs_netdev_detach_dev(vport);
182 182
183 /* Early release so we can unregister the device */ 183 /* We can be invoked by both explicit vport deletion and
184 * underlying netdev deregistration; delete the link only
185 * if it's not already shutting down.
186 */
187 if (vport->dev->reg_state == NETREG_REGISTERED)
188 rtnl_delete_link(vport->dev);
184 dev_put(vport->dev); 189 dev_put(vport->dev);
185 rtnl_delete_link(vport->dev);
186 vport->dev = NULL; 190 vport->dev = NULL;
187 rtnl_unlock(); 191 rtnl_unlock();
188 192
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 0ac0fd004d7e..31cbc8c5c7db 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -71,7 +71,7 @@ static struct hlist_head *hash_bucket(const struct net *net, const char *name)
71 return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)]; 71 return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)];
72} 72}
73 73
74int ovs_vport_ops_register(struct vport_ops *ops) 74int __ovs_vport_ops_register(struct vport_ops *ops)
75{ 75{
76 int err = -EEXIST; 76 int err = -EEXIST;
77 struct vport_ops *o; 77 struct vport_ops *o;
@@ -87,7 +87,7 @@ errout:
87 ovs_unlock(); 87 ovs_unlock();
88 return err; 88 return err;
89} 89}
90EXPORT_SYMBOL_GPL(ovs_vport_ops_register); 90EXPORT_SYMBOL_GPL(__ovs_vport_ops_register);
91 91
92void ovs_vport_ops_unregister(struct vport_ops *ops) 92void ovs_vport_ops_unregister(struct vport_ops *ops)
93{ 93{
@@ -256,8 +256,8 @@ int ovs_vport_set_options(struct vport *vport, struct nlattr *options)
256 * 256 *
257 * @vport: vport to delete. 257 * @vport: vport to delete.
258 * 258 *
259 * Detaches @vport from its datapath and destroys it. It is possible to fail 259 * Detaches @vport from its datapath and destroys it. ovs_mutex must
260 * for reasons such as lack of memory. ovs_mutex must be held. 260 * be held.
261 */ 261 */
262void ovs_vport_del(struct vport *vport) 262void ovs_vport_del(struct vport *vport)
263{ 263{
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
index bdfd82a7c064..8ea3a96980ac 100644
--- a/net/openvswitch/vport.h
+++ b/net/openvswitch/vport.h
@@ -196,7 +196,13 @@ static inline const char *ovs_vport_name(struct vport *vport)
196 return vport->dev->name; 196 return vport->dev->name;
197} 197}
198 198
199int ovs_vport_ops_register(struct vport_ops *ops); 199int __ovs_vport_ops_register(struct vport_ops *ops);
200#define ovs_vport_ops_register(ops) \
201 ({ \
202 (ops)->owner = THIS_MODULE; \
203 __ovs_vport_ops_register(ops); \
204 })
205
200void ovs_vport_ops_unregister(struct vport_ops *ops); 206void ovs_vport_ops_unregister(struct vport_ops *ops);
201 207
202static inline struct rtable *ovs_tunnel_route_lookup(struct net *net, 208static inline struct rtable *ovs_tunnel_route_lookup(struct net *net,
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index af399cac5205..992396aa635c 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1741,6 +1741,20 @@ static void fanout_release(struct sock *sk)
1741 kfree_rcu(po->rollover, rcu); 1741 kfree_rcu(po->rollover, rcu);
1742} 1742}
1743 1743
1744static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1745 struct sk_buff *skb)
1746{
1747 /* Earlier code assumed this would be a VLAN pkt, double-check
1748 * this now that we have the actual packet in hand. We can only
1749 * do this check on Ethernet devices.
1750 */
1751 if (unlikely(dev->type != ARPHRD_ETHER))
1752 return false;
1753
1754 skb_reset_mac_header(skb);
1755 return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1756}
1757
1744static const struct proto_ops packet_ops; 1758static const struct proto_ops packet_ops;
1745 1759
1746static const struct proto_ops packet_ops_spkt; 1760static const struct proto_ops packet_ops_spkt;
@@ -1902,18 +1916,10 @@ retry:
1902 goto retry; 1916 goto retry;
1903 } 1917 }
1904 1918
1905 if (len > (dev->mtu + dev->hard_header_len + extra_len)) { 1919 if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
1906 /* Earlier code assumed this would be a VLAN pkt, 1920 !packet_extra_vlan_len_allowed(dev, skb)) {
1907 * double-check this now that we have the actual 1921 err = -EMSGSIZE;
1908 * packet in hand. 1922 goto out_unlock;
1909 */
1910 struct ethhdr *ehdr;
1911 skb_reset_mac_header(skb);
1912 ehdr = eth_hdr(skb);
1913 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
1914 err = -EMSGSIZE;
1915 goto out_unlock;
1916 }
1917 } 1923 }
1918 1924
1919 skb->protocol = proto; 1925 skb->protocol = proto;
@@ -2323,8 +2329,8 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
2323static bool ll_header_truncated(const struct net_device *dev, int len) 2329static bool ll_header_truncated(const struct net_device *dev, int len)
2324{ 2330{
2325 /* net device doesn't like empty head */ 2331 /* net device doesn't like empty head */
2326 if (unlikely(len <= dev->hard_header_len)) { 2332 if (unlikely(len < dev->hard_header_len)) {
2327 net_warn_ratelimited("%s: packet size is too short (%d <= %d)\n", 2333 net_warn_ratelimited("%s: packet size is too short (%d < %d)\n",
2328 current->comm, len, dev->hard_header_len); 2334 current->comm, len, dev->hard_header_len);
2329 return true; 2335 return true;
2330 } 2336 }
@@ -2332,6 +2338,15 @@ static bool ll_header_truncated(const struct net_device *dev, int len)
2332 return false; 2338 return false;
2333} 2339}
2334 2340
2341static void tpacket_set_protocol(const struct net_device *dev,
2342 struct sk_buff *skb)
2343{
2344 if (dev->type == ARPHRD_ETHER) {
2345 skb_reset_mac_header(skb);
2346 skb->protocol = eth_hdr(skb)->h_proto;
2347 }
2348}
2349
2335static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, 2350static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2336 void *frame, struct net_device *dev, int size_max, 2351 void *frame, struct net_device *dev, int size_max,
2337 __be16 proto, unsigned char *addr, int hlen) 2352 __be16 proto, unsigned char *addr, int hlen)
@@ -2368,8 +2383,6 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2368 skb_reserve(skb, hlen); 2383 skb_reserve(skb, hlen);
2369 skb_reset_network_header(skb); 2384 skb_reset_network_header(skb);
2370 2385
2371 if (!packet_use_direct_xmit(po))
2372 skb_probe_transport_header(skb, 0);
2373 if (unlikely(po->tp_tx_has_off)) { 2386 if (unlikely(po->tp_tx_has_off)) {
2374 int off_min, off_max, off; 2387 int off_min, off_max, off;
2375 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll); 2388 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
@@ -2415,6 +2428,8 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2415 dev->hard_header_len); 2428 dev->hard_header_len);
2416 if (unlikely(err)) 2429 if (unlikely(err))
2417 return err; 2430 return err;
2431 if (!skb->protocol)
2432 tpacket_set_protocol(dev, skb);
2418 2433
2419 data += dev->hard_header_len; 2434 data += dev->hard_header_len;
2420 to_write -= dev->hard_header_len; 2435 to_write -= dev->hard_header_len;
@@ -2449,6 +2464,8 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2449 len = ((to_write > len_max) ? len_max : to_write); 2464 len = ((to_write > len_max) ? len_max : to_write);
2450 } 2465 }
2451 2466
2467 skb_probe_transport_header(skb, 0);
2468
2452 return tp_len; 2469 return tp_len;
2453} 2470}
2454 2471
@@ -2493,12 +2510,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2493 if (unlikely(!(dev->flags & IFF_UP))) 2510 if (unlikely(!(dev->flags & IFF_UP)))
2494 goto out_put; 2511 goto out_put;
2495 2512
2496 reserve = dev->hard_header_len + VLAN_HLEN; 2513 if (po->sk.sk_socket->type == SOCK_RAW)
2514 reserve = dev->hard_header_len;
2497 size_max = po->tx_ring.frame_size 2515 size_max = po->tx_ring.frame_size
2498 - (po->tp_hdrlen - sizeof(struct sockaddr_ll)); 2516 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2499 2517
2500 if (size_max > dev->mtu + reserve) 2518 if (size_max > dev->mtu + reserve + VLAN_HLEN)
2501 size_max = dev->mtu + reserve; 2519 size_max = dev->mtu + reserve + VLAN_HLEN;
2502 2520
2503 do { 2521 do {
2504 ph = packet_current_frame(po, &po->tx_ring, 2522 ph = packet_current_frame(po, &po->tx_ring,
@@ -2525,18 +2543,10 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2525 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto, 2543 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
2526 addr, hlen); 2544 addr, hlen);
2527 if (likely(tp_len >= 0) && 2545 if (likely(tp_len >= 0) &&
2528 tp_len > dev->mtu + dev->hard_header_len) { 2546 tp_len > dev->mtu + reserve &&
2529 struct ethhdr *ehdr; 2547 !packet_extra_vlan_len_allowed(dev, skb))
2530 /* Earlier code assumed this would be a VLAN pkt, 2548 tp_len = -EMSGSIZE;
2531 * double-check this now that we have the actual
2532 * packet in hand.
2533 */
2534 2549
2535 skb_reset_mac_header(skb);
2536 ehdr = eth_hdr(skb);
2537 if (ehdr->h_proto != htons(ETH_P_8021Q))
2538 tp_len = -EMSGSIZE;
2539 }
2540 if (unlikely(tp_len < 0)) { 2550 if (unlikely(tp_len < 0)) {
2541 if (po->tp_loss) { 2551 if (po->tp_loss) {
2542 __packet_set_status(po, ph, 2552 __packet_set_status(po, ph,
@@ -2765,18 +2775,10 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2765 2775
2766 sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags); 2776 sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
2767 2777
2768 if (!gso_type && (len > dev->mtu + reserve + extra_len)) { 2778 if (!gso_type && (len > dev->mtu + reserve + extra_len) &&
2769 /* Earlier code assumed this would be a VLAN pkt, 2779 !packet_extra_vlan_len_allowed(dev, skb)) {
2770 * double-check this now that we have the actual 2780 err = -EMSGSIZE;
2771 * packet in hand. 2781 goto out_free;
2772 */
2773 struct ethhdr *ehdr;
2774 skb_reset_mac_header(skb);
2775 ehdr = eth_hdr(skb);
2776 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
2777 err = -EMSGSIZE;
2778 goto out_free;
2779 }
2780 } 2782 }
2781 2783
2782 skb->protocol = proto; 2784 skb->protocol = proto;
@@ -2807,8 +2809,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2807 len += vnet_hdr_len; 2809 len += vnet_hdr_len;
2808 } 2810 }
2809 2811
2810 if (!packet_use_direct_xmit(po)) 2812 skb_probe_transport_header(skb, reserve);
2811 skb_probe_transport_header(skb, reserve); 2813
2812 if (unlikely(extra_len == 4)) 2814 if (unlikely(extra_len == 4))
2813 skb->no_fcs = 1; 2815 skb->no_fcs = 1;
2814 2816
@@ -4107,7 +4109,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4107 err = -EINVAL; 4109 err = -EINVAL;
4108 if (unlikely((int)req->tp_block_size <= 0)) 4110 if (unlikely((int)req->tp_block_size <= 0))
4109 goto out; 4111 goto out;
4110 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1))) 4112 if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4111 goto out; 4113 goto out;
4112 if (po->tp_version >= TPACKET_V3 && 4114 if (po->tp_version >= TPACKET_V3 &&
4113 (int)(req->tp_block_size - 4115 (int)(req->tp_block_size -
@@ -4119,8 +4121,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4119 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) 4121 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
4120 goto out; 4122 goto out;
4121 4123
4122 rb->frames_per_block = req->tp_block_size/req->tp_frame_size; 4124 rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4123 if (unlikely(rb->frames_per_block <= 0)) 4125 if (unlikely(rb->frames_per_block == 0))
4124 goto out; 4126 goto out;
4125 if (unlikely((rb->frames_per_block * req->tp_block_nr) != 4127 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4126 req->tp_frame_nr)) 4128 req->tp_frame_nr))
diff --git a/net/rds/connection.c b/net/rds/connection.c
index d4564036a339..e3b118cae81d 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -186,12 +186,6 @@ static struct rds_connection *__rds_conn_create(struct net *net,
186 } 186 }
187 } 187 }
188 188
189 if (trans == NULL) {
190 kmem_cache_free(rds_conn_slab, conn);
191 conn = ERR_PTR(-ENODEV);
192 goto out;
193 }
194
195 conn->c_trans = trans; 189 conn->c_trans = trans;
196 190
197 ret = trans->conn_alloc(conn, gfp); 191 ret = trans->conn_alloc(conn, gfp);
diff --git a/net/rds/send.c b/net/rds/send.c
index 827155c2ead1..c9cdb358ea88 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -1013,11 +1013,13 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
1013 release_sock(sk); 1013 release_sock(sk);
1014 } 1014 }
1015 1015
1016 /* racing with another thread binding seems ok here */ 1016 lock_sock(sk);
1017 if (daddr == 0 || rs->rs_bound_addr == 0) { 1017 if (daddr == 0 || rs->rs_bound_addr == 0) {
1018 release_sock(sk);
1018 ret = -ENOTCONN; /* XXX not a great errno */ 1019 ret = -ENOTCONN; /* XXX not a great errno */
1019 goto out; 1020 goto out;
1020 } 1021 }
1022 release_sock(sk);
1021 1023
1022 if (payload_len > rds_sk_sndbuf(rs)) { 1024 if (payload_len > rds_sk_sndbuf(rs)) {
1023 ret = -EMSGSIZE; 1025 ret = -EMSGSIZE;
diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
index e0547f521f20..adc555e0323d 100644
--- a/net/rxrpc/ar-ack.c
+++ b/net/rxrpc/ar-ack.c
@@ -723,8 +723,10 @@ process_further:
723 723
724 if ((call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY || 724 if ((call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY ||
725 call->state == RXRPC_CALL_SERVER_AWAIT_ACK) && 725 call->state == RXRPC_CALL_SERVER_AWAIT_ACK) &&
726 hard > tx) 726 hard > tx) {
727 call->acks_hard = tx;
727 goto all_acked; 728 goto all_acked;
729 }
728 730
729 smp_rmb(); 731 smp_rmb();
730 rxrpc_rotate_tx_window(call, hard - 1); 732 rxrpc_rotate_tx_window(call, hard - 1);
diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
index a40d3afe93b7..14c4e12c47b0 100644
--- a/net/rxrpc/ar-output.c
+++ b/net/rxrpc/ar-output.c
@@ -531,7 +531,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
531 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 531 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
532 532
533 /* this should be in poll */ 533 /* this should be in poll */
534 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 534 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
535 535
536 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 536 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
537 return -EPIPE; 537 return -EPIPE;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index f43c8f33f09e..7ec667dd4ce1 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -253,7 +253,8 @@ int qdisc_set_default(const char *name)
253} 253}
254 254
255/* We know handle. Find qdisc among all qdisc's attached to device 255/* We know handle. Find qdisc among all qdisc's attached to device
256 (root qdisc, all its children, children of children etc.) 256 * (root qdisc, all its children, children of children etc.)
257 * Note: caller either uses rtnl or rcu_read_lock()
257 */ 258 */
258 259
259static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle) 260static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
@@ -264,7 +265,7 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
264 root->handle == handle) 265 root->handle == handle)
265 return root; 266 return root;
266 267
267 list_for_each_entry(q, &root->list, list) { 268 list_for_each_entry_rcu(q, &root->list, list) {
268 if (q->handle == handle) 269 if (q->handle == handle)
269 return q; 270 return q;
270 } 271 }
@@ -277,15 +278,18 @@ void qdisc_list_add(struct Qdisc *q)
277 struct Qdisc *root = qdisc_dev(q)->qdisc; 278 struct Qdisc *root = qdisc_dev(q)->qdisc;
278 279
279 WARN_ON_ONCE(root == &noop_qdisc); 280 WARN_ON_ONCE(root == &noop_qdisc);
280 list_add_tail(&q->list, &root->list); 281 ASSERT_RTNL();
282 list_add_tail_rcu(&q->list, &root->list);
281 } 283 }
282} 284}
283EXPORT_SYMBOL(qdisc_list_add); 285EXPORT_SYMBOL(qdisc_list_add);
284 286
285void qdisc_list_del(struct Qdisc *q) 287void qdisc_list_del(struct Qdisc *q)
286{ 288{
287 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) 289 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
288 list_del(&q->list); 290 ASSERT_RTNL();
291 list_del_rcu(&q->list);
292 }
289} 293}
290EXPORT_SYMBOL(qdisc_list_del); 294EXPORT_SYMBOL(qdisc_list_del);
291 295
@@ -750,14 +754,18 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
750 if (n == 0) 754 if (n == 0)
751 return; 755 return;
752 drops = max_t(int, n, 0); 756 drops = max_t(int, n, 0);
757 rcu_read_lock();
753 while ((parentid = sch->parent)) { 758 while ((parentid = sch->parent)) {
754 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS)) 759 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
755 return; 760 break;
756 761
762 if (sch->flags & TCQ_F_NOPARENT)
763 break;
764 /* TODO: perform the search on a per txq basis */
757 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid)); 765 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
758 if (sch == NULL) { 766 if (sch == NULL) {
759 WARN_ON(parentid != TC_H_ROOT); 767 WARN_ON_ONCE(parentid != TC_H_ROOT);
760 return; 768 break;
761 } 769 }
762 cops = sch->ops->cl_ops; 770 cops = sch->ops->cl_ops;
763 if (cops->qlen_notify) { 771 if (cops->qlen_notify) {
@@ -768,6 +776,7 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
768 sch->q.qlen -= n; 776 sch->q.qlen -= n;
769 __qdisc_qstats_drop(sch, drops); 777 __qdisc_qstats_drop(sch, drops);
770 } 778 }
779 rcu_read_unlock();
771} 780}
772EXPORT_SYMBOL(qdisc_tree_decrease_qlen); 781EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
773 782
@@ -941,7 +950,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
941 } 950 }
942 lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock); 951 lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
943 if (!netif_is_multiqueue(dev)) 952 if (!netif_is_multiqueue(dev))
944 sch->flags |= TCQ_F_ONETXQUEUE; 953 sch->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
945 } 954 }
946 955
947 sch->handle = handle; 956 sch->handle = handle;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index cb5d4ad32946..e82a1ad80aa5 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -737,7 +737,7 @@ static void attach_one_default_qdisc(struct net_device *dev,
737 return; 737 return;
738 } 738 }
739 if (!netif_is_multiqueue(dev)) 739 if (!netif_is_multiqueue(dev))
740 qdisc->flags |= TCQ_F_ONETXQUEUE; 740 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
741 dev_queue->qdisc_sleeping = qdisc; 741 dev_queue->qdisc_sleeping = qdisc;
742} 742}
743 743
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index f3cbaecd283a..3e82f047caaf 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -63,7 +63,7 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
63 if (qdisc == NULL) 63 if (qdisc == NULL)
64 goto err; 64 goto err;
65 priv->qdiscs[ntx] = qdisc; 65 priv->qdiscs[ntx] = qdisc;
66 qdisc->flags |= TCQ_F_ONETXQUEUE; 66 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
67 } 67 }
68 68
69 sch->flags |= TCQ_F_MQROOT; 69 sch->flags |= TCQ_F_MQROOT;
@@ -156,7 +156,7 @@ static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
156 156
157 *old = dev_graft_qdisc(dev_queue, new); 157 *old = dev_graft_qdisc(dev_queue, new);
158 if (new) 158 if (new)
159 new->flags |= TCQ_F_ONETXQUEUE; 159 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
160 if (dev->flags & IFF_UP) 160 if (dev->flags & IFF_UP)
161 dev_activate(dev); 161 dev_activate(dev);
162 return 0; 162 return 0;
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index 3811a745452c..ad70ecf57ce7 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -132,7 +132,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
132 goto err; 132 goto err;
133 } 133 }
134 priv->qdiscs[i] = qdisc; 134 priv->qdiscs[i] = qdisc;
135 qdisc->flags |= TCQ_F_ONETXQUEUE; 135 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
136 } 136 }
137 137
138 /* If the mqprio options indicate that hardware should own 138 /* If the mqprio options indicate that hardware should own
@@ -209,7 +209,7 @@ static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
209 *old = dev_graft_qdisc(dev_queue, new); 209 *old = dev_graft_qdisc(dev_queue, new);
210 210
211 if (new) 211 if (new)
212 new->flags |= TCQ_F_ONETXQUEUE; 212 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
213 213
214 if (dev->flags & IFF_UP) 214 if (dev->flags & IFF_UP)
215 dev_activate(dev); 215 dev_activate(dev);
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 4f15b7d730e1..1543e39f47c3 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -809,8 +809,8 @@ int sctp_auth_ep_set_hmacs(struct sctp_endpoint *ep,
809 if (!has_sha1) 809 if (!has_sha1)
810 return -EINVAL; 810 return -EINVAL;
811 811
812 memcpy(ep->auth_hmacs_list->hmac_ids, &hmacs->shmac_idents[0], 812 for (i = 0; i < hmacs->shmac_num_idents; i++)
813 hmacs->shmac_num_idents * sizeof(__u16)); 813 ep->auth_hmacs_list->hmac_ids[i] = htons(hmacs->shmac_idents[i]);
814 ep->auth_hmacs_list->param_hdr.length = htons(sizeof(sctp_paramhdr_t) + 814 ep->auth_hmacs_list->param_hdr.length = htons(sizeof(sctp_paramhdr_t) +
815 hmacs->shmac_num_idents * sizeof(__u16)); 815 hmacs->shmac_num_idents * sizeof(__u16));
816 return 0; 816 return 0;
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index e917d27328ea..acb45b8c2a9d 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -209,6 +209,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
209 struct sock *sk = skb->sk; 209 struct sock *sk = skb->sk;
210 struct ipv6_pinfo *np = inet6_sk(sk); 210 struct ipv6_pinfo *np = inet6_sk(sk);
211 struct flowi6 *fl6 = &transport->fl.u.ip6; 211 struct flowi6 *fl6 = &transport->fl.u.ip6;
212 int res;
212 213
213 pr_debug("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n", __func__, skb, 214 pr_debug("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n", __func__, skb,
214 skb->len, &fl6->saddr, &fl6->daddr); 215 skb->len, &fl6->saddr, &fl6->daddr);
@@ -220,7 +221,10 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
220 221
221 SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS); 222 SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS);
222 223
223 return ip6_xmit(sk, skb, fl6, np->opt, np->tclass); 224 rcu_read_lock();
225 res = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt), np->tclass);
226 rcu_read_unlock();
227 return res;
224} 228}
225 229
226/* Returns the dst cache entry for the given source and destination ip 230/* Returns the dst cache entry for the given source and destination ip
@@ -262,7 +266,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
262 pr_debug("src=%pI6 - ", &fl6->saddr); 266 pr_debug("src=%pI6 - ", &fl6->saddr);
263 } 267 }
264 268
265 final_p = fl6_update_dst(fl6, np->opt, &final); 269 rcu_read_lock();
270 final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
271 rcu_read_unlock();
272
266 dst = ip6_dst_lookup_flow(sk, fl6, final_p); 273 dst = ip6_dst_lookup_flow(sk, fl6, final_p);
267 if (!asoc || saddr) 274 if (!asoc || saddr)
268 goto out; 275 goto out;
@@ -321,7 +328,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
321 if (baddr) { 328 if (baddr) {
322 fl6->saddr = baddr->v6.sin6_addr; 329 fl6->saddr = baddr->v6.sin6_addr;
323 fl6->fl6_sport = baddr->v6.sin6_port; 330 fl6->fl6_sport = baddr->v6.sin6_port;
324 final_p = fl6_update_dst(fl6, np->opt, &final); 331 final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
325 dst = ip6_dst_lookup_flow(sk, fl6, final_p); 332 dst = ip6_dst_lookup_flow(sk, fl6, final_p);
326 } 333 }
327 334
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 897c01c029ca..03c8256063ec 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -972,7 +972,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
972 return -EFAULT; 972 return -EFAULT;
973 973
974 /* Alloc space for the address array in kernel memory. */ 974 /* Alloc space for the address array in kernel memory. */
975 kaddrs = kmalloc(addrs_size, GFP_KERNEL); 975 kaddrs = kmalloc(addrs_size, GFP_USER | __GFP_NOWARN);
976 if (unlikely(!kaddrs)) 976 if (unlikely(!kaddrs))
977 return -ENOMEM; 977 return -ENOMEM;
978 978
@@ -4928,7 +4928,7 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
4928 to = optval + offsetof(struct sctp_getaddrs, addrs); 4928 to = optval + offsetof(struct sctp_getaddrs, addrs);
4929 space_left = len - offsetof(struct sctp_getaddrs, addrs); 4929 space_left = len - offsetof(struct sctp_getaddrs, addrs);
4930 4930
4931 addrs = kmalloc(space_left, GFP_KERNEL); 4931 addrs = kmalloc(space_left, GFP_USER | __GFP_NOWARN);
4932 if (!addrs) 4932 if (!addrs)
4933 return -ENOMEM; 4933 return -ENOMEM;
4934 4934
@@ -6458,7 +6458,7 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
6458 if (sctp_writeable(sk)) { 6458 if (sctp_writeable(sk)) {
6459 mask |= POLLOUT | POLLWRNORM; 6459 mask |= POLLOUT | POLLWRNORM;
6460 } else { 6460 } else {
6461 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 6461 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
6462 /* 6462 /*
6463 * Since the socket is not locked, the buffer 6463 * Since the socket is not locked, the buffer
6464 * might be made available after the writeable check and 6464 * might be made available after the writeable check and
@@ -6801,26 +6801,30 @@ no_packet:
6801static void __sctp_write_space(struct sctp_association *asoc) 6801static void __sctp_write_space(struct sctp_association *asoc)
6802{ 6802{
6803 struct sock *sk = asoc->base.sk; 6803 struct sock *sk = asoc->base.sk;
6804 struct socket *sock = sk->sk_socket;
6805 6804
6806 if ((sctp_wspace(asoc) > 0) && sock) { 6805 if (sctp_wspace(asoc) <= 0)
6807 if (waitqueue_active(&asoc->wait)) 6806 return;
6808 wake_up_interruptible(&asoc->wait); 6807
6808 if (waitqueue_active(&asoc->wait))
6809 wake_up_interruptible(&asoc->wait);
6809 6810
6810 if (sctp_writeable(sk)) { 6811 if (sctp_writeable(sk)) {
6811 wait_queue_head_t *wq = sk_sleep(sk); 6812 struct socket_wq *wq;
6812 6813
6813 if (wq && waitqueue_active(wq)) 6814 rcu_read_lock();
6814 wake_up_interruptible(wq); 6815 wq = rcu_dereference(sk->sk_wq);
6816 if (wq) {
6817 if (waitqueue_active(&wq->wait))
6818 wake_up_interruptible(&wq->wait);
6815 6819
6816 /* Note that we try to include the Async I/O support 6820 /* Note that we try to include the Async I/O support
6817 * here by modeling from the current TCP/UDP code. 6821 * here by modeling from the current TCP/UDP code.
6818 * We have not tested with it yet. 6822 * We have not tested with it yet.
6819 */ 6823 */
6820 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) 6824 if (!(sk->sk_shutdown & SEND_SHUTDOWN))
6821 sock_wake_async(sock, 6825 sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT);
6822 SOCK_WAKE_SPACE, POLL_OUT);
6823 } 6826 }
6827 rcu_read_unlock();
6824 } 6828 }
6825} 6829}
6826 6830
@@ -7375,6 +7379,13 @@ struct proto sctp_prot = {
7375 7379
7376#if IS_ENABLED(CONFIG_IPV6) 7380#if IS_ENABLED(CONFIG_IPV6)
7377 7381
7382#include <net/transp_v6.h>
7383static void sctp_v6_destroy_sock(struct sock *sk)
7384{
7385 sctp_destroy_sock(sk);
7386 inet6_destroy_sock(sk);
7387}
7388
7378struct proto sctpv6_prot = { 7389struct proto sctpv6_prot = {
7379 .name = "SCTPv6", 7390 .name = "SCTPv6",
7380 .owner = THIS_MODULE, 7391 .owner = THIS_MODULE,
@@ -7384,7 +7395,7 @@ struct proto sctpv6_prot = {
7384 .accept = sctp_accept, 7395 .accept = sctp_accept,
7385 .ioctl = sctp_ioctl, 7396 .ioctl = sctp_ioctl,
7386 .init = sctp_init_sock, 7397 .init = sctp_init_sock,
7387 .destroy = sctp_destroy_sock, 7398 .destroy = sctp_v6_destroy_sock,
7388 .shutdown = sctp_shutdown, 7399 .shutdown = sctp_shutdown,
7389 .setsockopt = sctp_setsockopt, 7400 .setsockopt = sctp_setsockopt,
7390 .getsockopt = sctp_getsockopt, 7401 .getsockopt = sctp_getsockopt,
diff --git a/net/socket.c b/net/socket.c
index dd2c247c99e3..456fadb3d819 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1056,27 +1056,20 @@ static int sock_fasync(int fd, struct file *filp, int on)
1056 return 0; 1056 return 0;
1057} 1057}
1058 1058
1059/* This function may be called only under socket lock or callback_lock or rcu_lock */ 1059/* This function may be called only under rcu_lock */
1060 1060
1061int sock_wake_async(struct socket *sock, int how, int band) 1061int sock_wake_async(struct socket_wq *wq, int how, int band)
1062{ 1062{
1063 struct socket_wq *wq; 1063 if (!wq || !wq->fasync_list)
1064
1065 if (!sock)
1066 return -1;
1067 rcu_read_lock();
1068 wq = rcu_dereference(sock->wq);
1069 if (!wq || !wq->fasync_list) {
1070 rcu_read_unlock();
1071 return -1; 1064 return -1;
1072 } 1065
1073 switch (how) { 1066 switch (how) {
1074 case SOCK_WAKE_WAITD: 1067 case SOCK_WAKE_WAITD:
1075 if (test_bit(SOCK_ASYNC_WAITDATA, &sock->flags)) 1068 if (test_bit(SOCKWQ_ASYNC_WAITDATA, &wq->flags))
1076 break; 1069 break;
1077 goto call_kill; 1070 goto call_kill;
1078 case SOCK_WAKE_SPACE: 1071 case SOCK_WAKE_SPACE:
1079 if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags)) 1072 if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags))
1080 break; 1073 break;
1081 /* fall through */ 1074 /* fall through */
1082 case SOCK_WAKE_IO: 1075 case SOCK_WAKE_IO:
@@ -1086,7 +1079,7 @@ call_kill:
1086 case SOCK_WAKE_URG: 1079 case SOCK_WAKE_URG:
1087 kill_fasync(&wq->fasync_list, SIGURG, band); 1080 kill_fasync(&wq->fasync_list, SIGURG, band);
1088 } 1081 }
1089 rcu_read_unlock(); 1082
1090 return 0; 1083 return 0;
1091} 1084}
1092EXPORT_SYMBOL(sock_wake_async); 1085EXPORT_SYMBOL(sock_wake_async);
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index 229956bf8457..95f82d8d4888 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -353,12 +353,20 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
353{ 353{
354 struct rpc_xprt *xprt = req->rq_xprt; 354 struct rpc_xprt *xprt = req->rq_xprt;
355 struct svc_serv *bc_serv = xprt->bc_serv; 355 struct svc_serv *bc_serv = xprt->bc_serv;
356 struct xdr_buf *rq_rcv_buf = &req->rq_rcv_buf;
356 357
357 spin_lock(&xprt->bc_pa_lock); 358 spin_lock(&xprt->bc_pa_lock);
358 list_del(&req->rq_bc_pa_list); 359 list_del(&req->rq_bc_pa_list);
359 xprt_dec_alloc_count(xprt, 1); 360 xprt_dec_alloc_count(xprt, 1);
360 spin_unlock(&xprt->bc_pa_lock); 361 spin_unlock(&xprt->bc_pa_lock);
361 362
363 if (copied <= rq_rcv_buf->head[0].iov_len) {
364 rq_rcv_buf->head[0].iov_len = copied;
365 rq_rcv_buf->page_len = 0;
366 } else {
367 rq_rcv_buf->page_len = copied - rq_rcv_buf->head[0].iov_len;
368 }
369
362 req->rq_private_buf.len = copied; 370 req->rq_private_buf.len = copied;
363 set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); 371 set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
364 372
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index bc5b7b5032ca..7fccf9675df8 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1363,6 +1363,7 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
1363 memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen); 1363 memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
1364 memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg)); 1364 memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg));
1365 memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res)); 1365 memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res));
1366 rqstp->rq_arg.len = req->rq_private_buf.len;
1366 1367
1367 /* reset result send buffer "put" position */ 1368 /* reset result send buffer "put" position */
1368 resv->iov_len = 0; 1369 resv->iov_len = 0;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 1d1a70498910..2ffaf6a79499 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -398,7 +398,7 @@ static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen,
398 if (unlikely(!sock)) 398 if (unlikely(!sock))
399 return -ENOTSOCK; 399 return -ENOTSOCK;
400 400
401 clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags); 401 clear_bit(SOCKWQ_ASYNC_NOSPACE, &sock->flags);
402 if (base != 0) { 402 if (base != 0) {
403 addr = NULL; 403 addr = NULL;
404 addrlen = 0; 404 addrlen = 0;
@@ -442,7 +442,7 @@ static void xs_nospace_callback(struct rpc_task *task)
442 struct sock_xprt *transport = container_of(task->tk_rqstp->rq_xprt, struct sock_xprt, xprt); 442 struct sock_xprt *transport = container_of(task->tk_rqstp->rq_xprt, struct sock_xprt, xprt);
443 443
444 transport->inet->sk_write_pending--; 444 transport->inet->sk_write_pending--;
445 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); 445 clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
446} 446}
447 447
448/** 448/**
@@ -467,7 +467,7 @@ static int xs_nospace(struct rpc_task *task)
467 467
468 /* Don't race with disconnect */ 468 /* Don't race with disconnect */
469 if (xprt_connected(xprt)) { 469 if (xprt_connected(xprt)) {
470 if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) { 470 if (test_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags)) {
471 /* 471 /*
472 * Notify TCP that we're limited by the application 472 * Notify TCP that we're limited by the application
473 * window size 473 * window size
@@ -478,7 +478,7 @@ static int xs_nospace(struct rpc_task *task)
478 xprt_wait_for_buffer_space(task, xs_nospace_callback); 478 xprt_wait_for_buffer_space(task, xs_nospace_callback);
479 } 479 }
480 } else { 480 } else {
481 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); 481 clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
482 ret = -ENOTCONN; 482 ret = -ENOTCONN;
483 } 483 }
484 484
@@ -626,7 +626,7 @@ process_status:
626 case -EPERM: 626 case -EPERM:
627 /* When the server has died, an ICMP port unreachable message 627 /* When the server has died, an ICMP port unreachable message
628 * prompts ECONNREFUSED. */ 628 * prompts ECONNREFUSED. */
629 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); 629 clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
630 } 630 }
631 631
632 return status; 632 return status;
@@ -715,7 +715,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
715 case -EADDRINUSE: 715 case -EADDRINUSE:
716 case -ENOBUFS: 716 case -ENOBUFS:
717 case -EPIPE: 717 case -EPIPE:
718 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); 718 clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
719 } 719 }
720 720
721 return status; 721 return status;
@@ -1618,7 +1618,7 @@ static void xs_write_space(struct sock *sk)
1618 1618
1619 if (unlikely(!(xprt = xprt_from_sock(sk)))) 1619 if (unlikely(!(xprt = xprt_from_sock(sk))))
1620 return; 1620 return;
1621 if (test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags) == 0) 1621 if (test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sock->flags) == 0)
1622 return; 1622 return;
1623 1623
1624 xprt_write_space(xprt); 1624 xprt_write_space(xprt);
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 9efbdbde2b08..91aea071ab27 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -191,6 +191,7 @@ void tipc_link_add_bc_peer(struct tipc_link *snd_l,
191 191
192 snd_l->ackers++; 192 snd_l->ackers++;
193 rcv_l->acked = snd_l->snd_nxt - 1; 193 rcv_l->acked = snd_l->snd_nxt - 1;
194 snd_l->state = LINK_ESTABLISHED;
194 tipc_link_build_bc_init_msg(uc_l, xmitq); 195 tipc_link_build_bc_init_msg(uc_l, xmitq);
195} 196}
196 197
@@ -206,6 +207,7 @@ void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
206 rcv_l->state = LINK_RESET; 207 rcv_l->state = LINK_RESET;
207 if (!snd_l->ackers) { 208 if (!snd_l->ackers) {
208 tipc_link_reset(snd_l); 209 tipc_link_reset(snd_l);
210 snd_l->state = LINK_RESET;
209 __skb_queue_purge(xmitq); 211 __skb_queue_purge(xmitq);
210 } 212 }
211} 213}
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 552dbaba9cf3..b53246fb0412 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -105,6 +105,7 @@ struct tipc_sock {
105static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb); 105static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
106static void tipc_data_ready(struct sock *sk); 106static void tipc_data_ready(struct sock *sk);
107static void tipc_write_space(struct sock *sk); 107static void tipc_write_space(struct sock *sk);
108static void tipc_sock_destruct(struct sock *sk);
108static int tipc_release(struct socket *sock); 109static int tipc_release(struct socket *sock);
109static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags); 110static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
110static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p); 111static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p);
@@ -381,6 +382,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
381 sk->sk_rcvbuf = sysctl_tipc_rmem[1]; 382 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
382 sk->sk_data_ready = tipc_data_ready; 383 sk->sk_data_ready = tipc_data_ready;
383 sk->sk_write_space = tipc_write_space; 384 sk->sk_write_space = tipc_write_space;
385 sk->sk_destruct = tipc_sock_destruct;
384 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT; 386 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
385 tsk->sent_unacked = 0; 387 tsk->sent_unacked = 0;
386 atomic_set(&tsk->dupl_rcvcnt, 0); 388 atomic_set(&tsk->dupl_rcvcnt, 0);
@@ -470,9 +472,6 @@ static int tipc_release(struct socket *sock)
470 tipc_node_remove_conn(net, dnode, tsk->portid); 472 tipc_node_remove_conn(net, dnode, tsk->portid);
471 } 473 }
472 474
473 /* Discard any remaining (connection-based) messages in receive queue */
474 __skb_queue_purge(&sk->sk_receive_queue);
475
476 /* Reject any messages that accumulated in backlog queue */ 475 /* Reject any messages that accumulated in backlog queue */
477 sock->state = SS_DISCONNECTING; 476 sock->state = SS_DISCONNECTING;
478 release_sock(sk); 477 release_sock(sk);
@@ -1515,6 +1514,11 @@ static void tipc_data_ready(struct sock *sk)
1515 rcu_read_unlock(); 1514 rcu_read_unlock();
1516} 1515}
1517 1516
1517static void tipc_sock_destruct(struct sock *sk)
1518{
1519 __skb_queue_purge(&sk->sk_receive_queue);
1520}
1521
1518/** 1522/**
1519 * filter_connect - Handle all incoming messages for a connection-based socket 1523 * filter_connect - Handle all incoming messages for a connection-based socket
1520 * @tsk: TIPC socket 1524 * @tsk: TIPC socket
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index ad2719ad4c1b..70c03271b798 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -158,8 +158,11 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
158 struct udp_media_addr *src = (struct udp_media_addr *)&b->addr.value; 158 struct udp_media_addr *src = (struct udp_media_addr *)&b->addr.value;
159 struct rtable *rt; 159 struct rtable *rt;
160 160
161 if (skb_headroom(skb) < UDP_MIN_HEADROOM) 161 if (skb_headroom(skb) < UDP_MIN_HEADROOM) {
162 pskb_expand_head(skb, UDP_MIN_HEADROOM, 0, GFP_ATOMIC); 162 err = pskb_expand_head(skb, UDP_MIN_HEADROOM, 0, GFP_ATOMIC);
163 if (err)
164 goto tx_error;
165 }
163 166
164 skb_set_inner_protocol(skb, htons(ETH_P_TIPC)); 167 skb_set_inner_protocol(skb, htons(ETH_P_TIPC));
165 ub = rcu_dereference_rtnl(b->media_ptr); 168 ub = rcu_dereference_rtnl(b->media_ptr);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index aaa0b58d6aba..45aebd966978 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -326,6 +326,118 @@ found:
326 return s; 326 return s;
327} 327}
328 328
329/* Support code for asymmetrically connected dgram sockets
330 *
331 * If a datagram socket is connected to a socket not itself connected
332 * to the first socket (eg, /dev/log), clients may only enqueue more
333 * messages if the present receive queue of the server socket is not
334 * "too large". This means there's a second writeability condition
335 * poll and sendmsg need to test. The dgram recv code will do a wake
336 * up on the peer_wait wait queue of a socket upon reception of a
337 * datagram which needs to be propagated to sleeping would-be writers
338 * since these might not have sent anything so far. This can't be
339 * accomplished via poll_wait because the lifetime of the server
340 * socket might be less than that of its clients if these break their
341 * association with it or if the server socket is closed while clients
342 * are still connected to it and there's no way to inform "a polling
343 * implementation" that it should let go of a certain wait queue
344 *
345 * In order to propagate a wake up, a wait_queue_t of the client
346 * socket is enqueued on the peer_wait queue of the server socket
347 * whose wake function does a wake_up on the ordinary client socket
348 * wait queue. This connection is established whenever a write (or
349 * poll for write) hit the flow control condition and broken when the
350 * association to the server socket is dissolved or after a wake up
351 * was relayed.
352 */
353
354static int unix_dgram_peer_wake_relay(wait_queue_t *q, unsigned mode, int flags,
355 void *key)
356{
357 struct unix_sock *u;
358 wait_queue_head_t *u_sleep;
359
360 u = container_of(q, struct unix_sock, peer_wake);
361
362 __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
363 q);
364 u->peer_wake.private = NULL;
365
366 /* relaying can only happen while the wq still exists */
367 u_sleep = sk_sleep(&u->sk);
368 if (u_sleep)
369 wake_up_interruptible_poll(u_sleep, key);
370
371 return 0;
372}
373
374static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
375{
376 struct unix_sock *u, *u_other;
377 int rc;
378
379 u = unix_sk(sk);
380 u_other = unix_sk(other);
381 rc = 0;
382 spin_lock(&u_other->peer_wait.lock);
383
384 if (!u->peer_wake.private) {
385 u->peer_wake.private = other;
386 __add_wait_queue(&u_other->peer_wait, &u->peer_wake);
387
388 rc = 1;
389 }
390
391 spin_unlock(&u_other->peer_wait.lock);
392 return rc;
393}
394
395static void unix_dgram_peer_wake_disconnect(struct sock *sk,
396 struct sock *other)
397{
398 struct unix_sock *u, *u_other;
399
400 u = unix_sk(sk);
401 u_other = unix_sk(other);
402 spin_lock(&u_other->peer_wait.lock);
403
404 if (u->peer_wake.private == other) {
405 __remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
406 u->peer_wake.private = NULL;
407 }
408
409 spin_unlock(&u_other->peer_wait.lock);
410}
411
412static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
413 struct sock *other)
414{
415 unix_dgram_peer_wake_disconnect(sk, other);
416 wake_up_interruptible_poll(sk_sleep(sk),
417 POLLOUT |
418 POLLWRNORM |
419 POLLWRBAND);
420}
421
422/* preconditions:
423 * - unix_peer(sk) == other
424 * - association is stable
425 */
426static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
427{
428 int connected;
429
430 connected = unix_dgram_peer_wake_connect(sk, other);
431
432 if (unix_recvq_full(other))
433 return 1;
434
435 if (connected)
436 unix_dgram_peer_wake_disconnect(sk, other);
437
438 return 0;
439}
440
329static int unix_writable(const struct sock *sk) 441static int unix_writable(const struct sock *sk)
330{ 442{
331 return sk->sk_state != TCP_LISTEN && 443 return sk->sk_state != TCP_LISTEN &&
@@ -431,6 +543,8 @@ static void unix_release_sock(struct sock *sk, int embrion)
431 skpair->sk_state_change(skpair); 543 skpair->sk_state_change(skpair);
432 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP); 544 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
433 } 545 }
546
547 unix_dgram_peer_wake_disconnect(sk, skpair);
434 sock_put(skpair); /* It may now die */ 548 sock_put(skpair); /* It may now die */
435 unix_peer(sk) = NULL; 549 unix_peer(sk) = NULL;
436 } 550 }
@@ -441,6 +555,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
441 if (state == TCP_LISTEN) 555 if (state == TCP_LISTEN)
442 unix_release_sock(skb->sk, 1); 556 unix_release_sock(skb->sk, 1);
443 /* passed fds are erased in the kfree_skb hook */ 557 /* passed fds are erased in the kfree_skb hook */
558 UNIXCB(skb).consumed = skb->len;
444 kfree_skb(skb); 559 kfree_skb(skb);
445 } 560 }
446 561
@@ -665,6 +780,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
665 INIT_LIST_HEAD(&u->link); 780 INIT_LIST_HEAD(&u->link);
666 mutex_init(&u->readlock); /* single task reading lock */ 781 mutex_init(&u->readlock); /* single task reading lock */
667 init_waitqueue_head(&u->peer_wait); 782 init_waitqueue_head(&u->peer_wait);
783 init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
668 unix_insert_socket(unix_sockets_unbound(sk), sk); 784 unix_insert_socket(unix_sockets_unbound(sk), sk);
669out: 785out:
670 if (sk == NULL) 786 if (sk == NULL)
@@ -1032,6 +1148,8 @@ restart:
1032 if (unix_peer(sk)) { 1148 if (unix_peer(sk)) {
1033 struct sock *old_peer = unix_peer(sk); 1149 struct sock *old_peer = unix_peer(sk);
1034 unix_peer(sk) = other; 1150 unix_peer(sk) = other;
1151 unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1152
1035 unix_state_double_unlock(sk, other); 1153 unix_state_double_unlock(sk, other);
1036 1154
1037 if (other != old_peer) 1155 if (other != old_peer)
@@ -1433,6 +1551,14 @@ static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool sen
1433 return err; 1551 return err;
1434} 1552}
1435 1553
1554static bool unix_passcred_enabled(const struct socket *sock,
1555 const struct sock *other)
1556{
1557 return test_bit(SOCK_PASSCRED, &sock->flags) ||
1558 !other->sk_socket ||
1559 test_bit(SOCK_PASSCRED, &other->sk_socket->flags);
1560}
1561
1436/* 1562/*
1437 * Some apps rely on write() giving SCM_CREDENTIALS 1563 * Some apps rely on write() giving SCM_CREDENTIALS
1438 * We include credentials if source or destination socket 1564 * We include credentials if source or destination socket
@@ -1443,14 +1569,41 @@ static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1443{ 1569{
1444 if (UNIXCB(skb).pid) 1570 if (UNIXCB(skb).pid)
1445 return; 1571 return;
1446 if (test_bit(SOCK_PASSCRED, &sock->flags) || 1572 if (unix_passcred_enabled(sock, other)) {
1447 !other->sk_socket ||
1448 test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {
1449 UNIXCB(skb).pid = get_pid(task_tgid(current)); 1573 UNIXCB(skb).pid = get_pid(task_tgid(current));
1450 current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid); 1574 current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
1451 } 1575 }
1452} 1576}
1453 1577
1578static int maybe_init_creds(struct scm_cookie *scm,
1579 struct socket *socket,
1580 const struct sock *other)
1581{
1582 int err;
1583 struct msghdr msg = { .msg_controllen = 0 };
1584
1585 err = scm_send(socket, &msg, scm, false);
1586 if (err)
1587 return err;
1588
1589 if (unix_passcred_enabled(socket, other)) {
1590 scm->pid = get_pid(task_tgid(current));
1591 current_uid_gid(&scm->creds.uid, &scm->creds.gid);
1592 }
1593 return err;
1594}
1595
1596static bool unix_skb_scm_eq(struct sk_buff *skb,
1597 struct scm_cookie *scm)
1598{
1599 const struct unix_skb_parms *u = &UNIXCB(skb);
1600
1601 return u->pid == scm->pid &&
1602 uid_eq(u->uid, scm->creds.uid) &&
1603 gid_eq(u->gid, scm->creds.gid) &&
1604 unix_secdata_eq(scm, skb);
1605}
1606
1454/* 1607/*
1455 * Send AF_UNIX data. 1608 * Send AF_UNIX data.
1456 */ 1609 */
@@ -1471,6 +1624,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1471 struct scm_cookie scm; 1624 struct scm_cookie scm;
1472 int max_level; 1625 int max_level;
1473 int data_len = 0; 1626 int data_len = 0;
1627 int sk_locked;
1474 1628
1475 wait_for_unix_gc(); 1629 wait_for_unix_gc();
1476 err = scm_send(sock, msg, &scm, false); 1630 err = scm_send(sock, msg, &scm, false);
@@ -1549,12 +1703,14 @@ restart:
1549 goto out_free; 1703 goto out_free;
1550 } 1704 }
1551 1705
1706 sk_locked = 0;
1552 unix_state_lock(other); 1707 unix_state_lock(other);
1708restart_locked:
1553 err = -EPERM; 1709 err = -EPERM;
1554 if (!unix_may_send(sk, other)) 1710 if (!unix_may_send(sk, other))
1555 goto out_unlock; 1711 goto out_unlock;
1556 1712
1557 if (sock_flag(other, SOCK_DEAD)) { 1713 if (unlikely(sock_flag(other, SOCK_DEAD))) {
1558 /* 1714 /*
1559 * Check with 1003.1g - what should 1715 * Check with 1003.1g - what should
1560 * datagram error 1716 * datagram error
@@ -1562,10 +1718,14 @@ restart:
1562 unix_state_unlock(other); 1718 unix_state_unlock(other);
1563 sock_put(other); 1719 sock_put(other);
1564 1720
1721 if (!sk_locked)
1722 unix_state_lock(sk);
1723
1565 err = 0; 1724 err = 0;
1566 unix_state_lock(sk);
1567 if (unix_peer(sk) == other) { 1725 if (unix_peer(sk) == other) {
1568 unix_peer(sk) = NULL; 1726 unix_peer(sk) = NULL;
1727 unix_dgram_peer_wake_disconnect_wakeup(sk, other);
1728
1569 unix_state_unlock(sk); 1729 unix_state_unlock(sk);
1570 1730
1571 unix_dgram_disconnected(sk, other); 1731 unix_dgram_disconnected(sk, other);
@@ -1591,21 +1751,38 @@ restart:
1591 goto out_unlock; 1751 goto out_unlock;
1592 } 1752 }
1593 1753
1594 if (unix_peer(other) != sk && unix_recvq_full(other)) { 1754 if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
1595 if (!timeo) { 1755 if (timeo) {
1596 err = -EAGAIN; 1756 timeo = unix_wait_for_peer(other, timeo);
1597 goto out_unlock; 1757
1758 err = sock_intr_errno(timeo);
1759 if (signal_pending(current))
1760 goto out_free;
1761
1762 goto restart;
1598 } 1763 }
1599 1764
1600 timeo = unix_wait_for_peer(other, timeo); 1765 if (!sk_locked) {
1766 unix_state_unlock(other);
1767 unix_state_double_lock(sk, other);
1768 }
1601 1769
1602 err = sock_intr_errno(timeo); 1770 if (unix_peer(sk) != other ||
1603 if (signal_pending(current)) 1771 unix_dgram_peer_wake_me(sk, other)) {
1604 goto out_free; 1772 err = -EAGAIN;
1773 sk_locked = 1;
1774 goto out_unlock;
1775 }
1605 1776
1606 goto restart; 1777 if (!sk_locked) {
1778 sk_locked = 1;
1779 goto restart_locked;
1780 }
1607 } 1781 }
1608 1782
1783 if (unlikely(sk_locked))
1784 unix_state_unlock(sk);
1785
1609 if (sock_flag(other, SOCK_RCVTSTAMP)) 1786 if (sock_flag(other, SOCK_RCVTSTAMP))
1610 __net_timestamp(skb); 1787 __net_timestamp(skb);
1611 maybe_add_creds(skb, sock, other); 1788 maybe_add_creds(skb, sock, other);
@@ -1619,6 +1796,8 @@ restart:
1619 return len; 1796 return len;
1620 1797
1621out_unlock: 1798out_unlock:
1799 if (sk_locked)
1800 unix_state_unlock(sk);
1622 unix_state_unlock(other); 1801 unix_state_unlock(other);
1623out_free: 1802out_free:
1624 kfree_skb(skb); 1803 kfree_skb(skb);
@@ -1740,8 +1919,10 @@ out_err:
1740static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page, 1919static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
1741 int offset, size_t size, int flags) 1920 int offset, size_t size, int flags)
1742{ 1921{
1743 int err = 0; 1922 int err;
1744 bool send_sigpipe = true; 1923 bool send_sigpipe = false;
1924 bool init_scm = true;
1925 struct scm_cookie scm;
1745 struct sock *other, *sk = socket->sk; 1926 struct sock *other, *sk = socket->sk;
1746 struct sk_buff *skb, *newskb = NULL, *tail = NULL; 1927 struct sk_buff *skb, *newskb = NULL, *tail = NULL;
1747 1928
@@ -1759,7 +1940,7 @@ alloc_skb:
1759 newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT, 1940 newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
1760 &err, 0); 1941 &err, 0);
1761 if (!newskb) 1942 if (!newskb)
1762 return err; 1943 goto err;
1763 } 1944 }
1764 1945
1765 /* we must acquire readlock as we modify already present 1946 /* we must acquire readlock as we modify already present
@@ -1768,12 +1949,12 @@ alloc_skb:
1768 err = mutex_lock_interruptible(&unix_sk(other)->readlock); 1949 err = mutex_lock_interruptible(&unix_sk(other)->readlock);
1769 if (err) { 1950 if (err) {
1770 err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS; 1951 err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
1771 send_sigpipe = false;
1772 goto err; 1952 goto err;
1773 } 1953 }
1774 1954
1775 if (sk->sk_shutdown & SEND_SHUTDOWN) { 1955 if (sk->sk_shutdown & SEND_SHUTDOWN) {
1776 err = -EPIPE; 1956 err = -EPIPE;
1957 send_sigpipe = true;
1777 goto err_unlock; 1958 goto err_unlock;
1778 } 1959 }
1779 1960
@@ -1782,23 +1963,34 @@ alloc_skb:
1782 if (sock_flag(other, SOCK_DEAD) || 1963 if (sock_flag(other, SOCK_DEAD) ||
1783 other->sk_shutdown & RCV_SHUTDOWN) { 1964 other->sk_shutdown & RCV_SHUTDOWN) {
1784 err = -EPIPE; 1965 err = -EPIPE;
1966 send_sigpipe = true;
1785 goto err_state_unlock; 1967 goto err_state_unlock;
1786 } 1968 }
1787 1969
1970 if (init_scm) {
1971 err = maybe_init_creds(&scm, socket, other);
1972 if (err)
1973 goto err_state_unlock;
1974 init_scm = false;
1975 }
1976
1788 skb = skb_peek_tail(&other->sk_receive_queue); 1977 skb = skb_peek_tail(&other->sk_receive_queue);
1789 if (tail && tail == skb) { 1978 if (tail && tail == skb) {
1790 skb = newskb; 1979 skb = newskb;
1791 } else if (!skb) { 1980 } else if (!skb || !unix_skb_scm_eq(skb, &scm)) {
1792 if (newskb) 1981 if (newskb) {
1793 skb = newskb; 1982 skb = newskb;
1794 else 1983 } else {
1984 tail = skb;
1795 goto alloc_skb; 1985 goto alloc_skb;
1986 }
1796 } else if (newskb) { 1987 } else if (newskb) {
1797 /* this is fast path, we don't necessarily need to 1988 /* this is fast path, we don't necessarily need to
1798 * call to kfree_skb even though with newskb == NULL 1989 * call to kfree_skb even though with newskb == NULL
1799 * this - does no harm 1990 * this - does no harm
1800 */ 1991 */
1801 consume_skb(newskb); 1992 consume_skb(newskb);
1993 newskb = NULL;
1802 } 1994 }
1803 1995
1804 if (skb_append_pagefrags(skb, page, offset, size)) { 1996 if (skb_append_pagefrags(skb, page, offset, size)) {
@@ -1811,14 +2003,20 @@ alloc_skb:
1811 skb->truesize += size; 2003 skb->truesize += size;
1812 atomic_add(size, &sk->sk_wmem_alloc); 2004 atomic_add(size, &sk->sk_wmem_alloc);
1813 2005
1814 if (newskb) 2006 if (newskb) {
2007 err = unix_scm_to_skb(&scm, skb, false);
2008 if (err)
2009 goto err_state_unlock;
2010 spin_lock(&other->sk_receive_queue.lock);
1815 __skb_queue_tail(&other->sk_receive_queue, newskb); 2011 __skb_queue_tail(&other->sk_receive_queue, newskb);
2012 spin_unlock(&other->sk_receive_queue.lock);
2013 }
1816 2014
1817 unix_state_unlock(other); 2015 unix_state_unlock(other);
1818 mutex_unlock(&unix_sk(other)->readlock); 2016 mutex_unlock(&unix_sk(other)->readlock);
1819 2017
1820 other->sk_data_ready(other); 2018 other->sk_data_ready(other);
1821 2019 scm_destroy(&scm);
1822 return size; 2020 return size;
1823 2021
1824err_state_unlock: 2022err_state_unlock:
@@ -1829,6 +2027,8 @@ err:
1829 kfree_skb(newskb); 2027 kfree_skb(newskb);
1830 if (send_sigpipe && !(flags & MSG_NOSIGNAL)) 2028 if (send_sigpipe && !(flags & MSG_NOSIGNAL))
1831 send_sig(SIGPIPE, current, 0); 2029 send_sig(SIGPIPE, current, 0);
2030 if (!init_scm)
2031 scm_destroy(&scm);
1832 return err; 2032 return err;
1833} 2033}
1834 2034
@@ -1991,7 +2191,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
1991 !timeo) 2191 !timeo)
1992 break; 2192 break;
1993 2193
1994 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 2194 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1995 unix_state_unlock(sk); 2195 unix_state_unlock(sk);
1996 timeo = freezable_schedule_timeout(timeo); 2196 timeo = freezable_schedule_timeout(timeo);
1997 unix_state_lock(sk); 2197 unix_state_lock(sk);
@@ -1999,7 +2199,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
1999 if (sock_flag(sk, SOCK_DEAD)) 2199 if (sock_flag(sk, SOCK_DEAD))
2000 break; 2200 break;
2001 2201
2002 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 2202 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2003 } 2203 }
2004 2204
2005 finish_wait(sk_sleep(sk), &wait); 2205 finish_wait(sk_sleep(sk), &wait);
@@ -2072,6 +2272,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
2072 2272
2073 do { 2273 do {
2074 int chunk; 2274 int chunk;
2275 bool drop_skb;
2075 struct sk_buff *skb, *last; 2276 struct sk_buff *skb, *last;
2076 2277
2077 unix_state_lock(sk); 2278 unix_state_lock(sk);
@@ -2131,10 +2332,7 @@ unlock:
2131 2332
2132 if (check_creds) { 2333 if (check_creds) {
2133 /* Never glue messages from different writers */ 2334 /* Never glue messages from different writers */
2134 if ((UNIXCB(skb).pid != scm.pid) || 2335 if (!unix_skb_scm_eq(skb, &scm))
2135 !uid_eq(UNIXCB(skb).uid, scm.creds.uid) ||
2136 !gid_eq(UNIXCB(skb).gid, scm.creds.gid) ||
2137 !unix_secdata_eq(&scm, skb))
2138 break; 2336 break;
2139 } else if (test_bit(SOCK_PASSCRED, &sock->flags)) { 2337 } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
2140 /* Copy credentials */ 2338 /* Copy credentials */
@@ -2152,7 +2350,11 @@ unlock:
2152 } 2350 }
2153 2351
2154 chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size); 2352 chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
2353 skb_get(skb);
2155 chunk = state->recv_actor(skb, skip, chunk, state); 2354 chunk = state->recv_actor(skb, skip, chunk, state);
2355 drop_skb = !unix_skb_len(skb);
2356 /* skb is only safe to use if !drop_skb */
2357 consume_skb(skb);
2156 if (chunk < 0) { 2358 if (chunk < 0) {
2157 if (copied == 0) 2359 if (copied == 0)
2158 copied = -EFAULT; 2360 copied = -EFAULT;
@@ -2161,6 +2363,18 @@ unlock:
2161 copied += chunk; 2363 copied += chunk;
2162 size -= chunk; 2364 size -= chunk;
2163 2365
2366 if (drop_skb) {
2367 /* the skb was touched by a concurrent reader;
2368 * we should not expect anything from this skb
2369 * anymore and assume it invalid - we can be
2370 * sure it was dropped from the socket queue
2371 *
2372 * let's report a short read
2373 */
2374 err = 0;
2375 break;
2376 }
2377
2164 /* Mark read part of skb as used */ 2378 /* Mark read part of skb as used */
2165 if (!(flags & MSG_PEEK)) { 2379 if (!(flags & MSG_PEEK)) {
2166 UNIXCB(skb).consumed += chunk; 2380 UNIXCB(skb).consumed += chunk;
@@ -2454,20 +2668,22 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2454 return mask; 2668 return mask;
2455 2669
2456 writable = unix_writable(sk); 2670 writable = unix_writable(sk);
2457 other = unix_peer_get(sk); 2671 if (writable) {
2458 if (other) { 2672 unix_state_lock(sk);
2459 if (unix_peer(other) != sk) { 2673
2460 sock_poll_wait(file, &unix_sk(other)->peer_wait, wait); 2674 other = unix_peer(sk);
2461 if (unix_recvq_full(other)) 2675 if (other && unix_peer(other) != sk &&
2462 writable = 0; 2676 unix_recvq_full(other) &&
2463 } 2677 unix_dgram_peer_wake_me(sk, other))
2464 sock_put(other); 2678 writable = 0;
2679
2680 unix_state_unlock(sk);
2465 } 2681 }
2466 2682
2467 if (writable) 2683 if (writable)
2468 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 2684 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2469 else 2685 else
2470 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 2686 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2471 2687
2472 return mask; 2688 return mask;
2473} 2689}
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 79b4596b5f9a..edd638b5825f 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -67,10 +67,13 @@ HOSTLOADLIBES_lathist += -lelf
67# point this to your LLVM backend with bpf support 67# point this to your LLVM backend with bpf support
68LLC=$(srctree)/tools/bpf/llvm/bld/Debug+Asserts/bin/llc 68LLC=$(srctree)/tools/bpf/llvm/bld/Debug+Asserts/bin/llc
69 69
70# asm/sysreg.h inline assmbly used by it is incompatible with llvm.
71# But, ehere is not easy way to fix it, so just exclude it since it is
72# useless for BPF samples.
70$(obj)/%.o: $(src)/%.c 73$(obj)/%.o: $(src)/%.c
71 clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \ 74 clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \
72 -D__KERNEL__ -Wno-unused-value -Wno-pointer-sign \ 75 -D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \
73 -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@ 76 -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@
74 clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \ 77 clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \
75 -D__KERNEL__ -Wno-unused-value -Wno-pointer-sign \ 78 -D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \
76 -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=asm -o $@.s 79 -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=asm -o $@.s
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index 125b906cd1d4..638a38e1b419 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -2711,7 +2711,7 @@ $kernelversion = get_kernel_version();
2711 2711
2712# generate a sequence of code that will splice in highlighting information 2712# generate a sequence of code that will splice in highlighting information
2713# using the s// operator. 2713# using the s// operator.
2714foreach my $k (keys @highlights) { 2714for (my $k = 0; $k < @highlights; $k++) {
2715 my $pattern = $highlights[$k][0]; 2715 my $pattern = $highlights[$k][0];
2716 my $result = $highlights[$k][1]; 2716 my $result = $highlights[$k][1];
2717# print STDERR "scanning pattern:$pattern, highlight:($result)\n"; 2717# print STDERR "scanning pattern:$pattern, highlight:($result)\n";
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index 927db9f35ad6..696ccfa08d10 100644
--- a/security/keys/encrypted-keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -845,6 +845,8 @@ static int encrypted_update(struct key *key, struct key_preparsed_payload *prep)
845 size_t datalen = prep->datalen; 845 size_t datalen = prep->datalen;
846 int ret = 0; 846 int ret = 0;
847 847
848 if (test_bit(KEY_FLAG_NEGATIVE, &key->flags))
849 return -ENOKEY;
848 if (datalen <= 0 || datalen > 32767 || !prep->data) 850 if (datalen <= 0 || datalen > 32767 || !prep->data)
849 return -EINVAL; 851 return -EINVAL;
850 852
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index 903dace648a1..16dec53184b6 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -1007,13 +1007,16 @@ static void trusted_rcu_free(struct rcu_head *rcu)
1007 */ 1007 */
1008static int trusted_update(struct key *key, struct key_preparsed_payload *prep) 1008static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
1009{ 1009{
1010 struct trusted_key_payload *p = key->payload.data[0]; 1010 struct trusted_key_payload *p;
1011 struct trusted_key_payload *new_p; 1011 struct trusted_key_payload *new_p;
1012 struct trusted_key_options *new_o; 1012 struct trusted_key_options *new_o;
1013 size_t datalen = prep->datalen; 1013 size_t datalen = prep->datalen;
1014 char *datablob; 1014 char *datablob;
1015 int ret = 0; 1015 int ret = 0;
1016 1016
1017 if (test_bit(KEY_FLAG_NEGATIVE, &key->flags))
1018 return -ENOKEY;
1019 p = key->payload.data[0];
1017 if (!p->migratable) 1020 if (!p->migratable)
1018 return -EPERM; 1021 return -EPERM;
1019 if (datalen <= 0 || datalen > 32767 || !prep->data) 1022 if (datalen <= 0 || datalen > 32767 || !prep->data)
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
index 28cb30f80256..8705d79b2c6f 100644
--- a/security/keys/user_defined.c
+++ b/security/keys/user_defined.c
@@ -120,7 +120,10 @@ int user_update(struct key *key, struct key_preparsed_payload *prep)
120 120
121 if (ret == 0) { 121 if (ret == 0) {
122 /* attach the new data, displacing the old */ 122 /* attach the new data, displacing the old */
123 zap = key->payload.data[0]; 123 if (!test_bit(KEY_FLAG_NEGATIVE, &key->flags))
124 zap = key->payload.data[0];
125 else
126 zap = NULL;
124 rcu_assign_keypointer(key, upayload); 127 rcu_assign_keypointer(key, upayload);
125 key->expiry = 0; 128 key->expiry = 0;
126 } 129 }
diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c
index 18643bf9894d..456e1a9bcfde 100644
--- a/security/selinux/ss/conditional.c
+++ b/security/selinux/ss/conditional.c
@@ -638,7 +638,7 @@ void cond_compute_av(struct avtab *ctab, struct avtab_key *key,
638{ 638{
639 struct avtab_node *node; 639 struct avtab_node *node;
640 640
641 if (!ctab || !key || !avd || !xperms) 641 if (!ctab || !key || !avd)
642 return; 642 return;
643 643
644 for (node = avtab_search_node(ctab, key); node; 644 for (node = avtab_search_node(ctab, key); node;
@@ -657,7 +657,7 @@ void cond_compute_av(struct avtab *ctab, struct avtab_key *key,
657 if ((u16)(AVTAB_AUDITALLOW|AVTAB_ENABLED) == 657 if ((u16)(AVTAB_AUDITALLOW|AVTAB_ENABLED) ==
658 (node->key.specified & (AVTAB_AUDITALLOW|AVTAB_ENABLED))) 658 (node->key.specified & (AVTAB_AUDITALLOW|AVTAB_ENABLED)))
659 avd->auditallow |= node->datum.u.data; 659 avd->auditallow |= node->datum.u.data;
660 if ((node->key.specified & AVTAB_ENABLED) && 660 if (xperms && (node->key.specified & AVTAB_ENABLED) &&
661 (node->key.specified & AVTAB_XPERMS)) 661 (node->key.specified & AVTAB_XPERMS))
662 services_compute_xperms_drivers(xperms, node); 662 services_compute_xperms_drivers(xperms, node);
663 } 663 }
diff --git a/sound/firewire/dice/dice.c b/sound/firewire/dice/dice.c
index 5d99436dfcae..0cda05c72f50 100644
--- a/sound/firewire/dice/dice.c
+++ b/sound/firewire/dice/dice.c
@@ -12,9 +12,11 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
12MODULE_LICENSE("GPL v2"); 12MODULE_LICENSE("GPL v2");
13 13
14#define OUI_WEISS 0x001c6a 14#define OUI_WEISS 0x001c6a
15#define OUI_LOUD 0x000ff2
15 16
16#define DICE_CATEGORY_ID 0x04 17#define DICE_CATEGORY_ID 0x04
17#define WEISS_CATEGORY_ID 0x00 18#define WEISS_CATEGORY_ID 0x00
19#define LOUD_CATEGORY_ID 0x10
18 20
19static int dice_interface_check(struct fw_unit *unit) 21static int dice_interface_check(struct fw_unit *unit)
20{ 22{
@@ -57,6 +59,8 @@ static int dice_interface_check(struct fw_unit *unit)
57 } 59 }
58 if (vendor == OUI_WEISS) 60 if (vendor == OUI_WEISS)
59 category = WEISS_CATEGORY_ID; 61 category = WEISS_CATEGORY_ID;
62 else if (vendor == OUI_LOUD)
63 category = LOUD_CATEGORY_ID;
60 else 64 else
61 category = DICE_CATEGORY_ID; 65 category = DICE_CATEGORY_ID;
62 if (device->config_rom[3] != ((vendor << 8) | category) || 66 if (device->config_rom[3] != ((vendor << 8) | category) ||
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 8a7fbdcb4072..963f82430938 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -312,6 +312,10 @@ enum {
312 (AZX_DCAPS_INTEL_PCH | AZX_DCAPS_SEPARATE_STREAM_TAG |\ 312 (AZX_DCAPS_INTEL_PCH | AZX_DCAPS_SEPARATE_STREAM_TAG |\
313 AZX_DCAPS_I915_POWERWELL) 313 AZX_DCAPS_I915_POWERWELL)
314 314
315#define AZX_DCAPS_INTEL_BROXTON \
316 (AZX_DCAPS_INTEL_PCH | AZX_DCAPS_SEPARATE_STREAM_TAG |\
317 AZX_DCAPS_I915_POWERWELL)
318
315/* quirks for ATI SB / AMD Hudson */ 319/* quirks for ATI SB / AMD Hudson */
316#define AZX_DCAPS_PRESET_ATI_SB \ 320#define AZX_DCAPS_PRESET_ATI_SB \
317 (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB |\ 321 (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB |\
@@ -2124,6 +2128,9 @@ static const struct pci_device_id azx_ids[] = {
2124 /* Sunrise Point-LP */ 2128 /* Sunrise Point-LP */
2125 { PCI_DEVICE(0x8086, 0x9d70), 2129 { PCI_DEVICE(0x8086, 0x9d70),
2126 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, 2130 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
2131 /* Broxton-P(Apollolake) */
2132 { PCI_DEVICE(0x8086, 0x5a98),
2133 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
2127 /* Haswell */ 2134 /* Haswell */
2128 { PCI_DEVICE(0x8086, 0x0a0c), 2135 { PCI_DEVICE(0x8086, 0x0a0c),
2129 .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL }, 2136 .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index c8b8ef5246a6..ef198903c0c3 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -955,6 +955,7 @@ static int patch_conexant_auto(struct hda_codec *codec)
955 */ 955 */
956 956
957static const struct hda_device_id snd_hda_id_conexant[] = { 957static const struct hda_device_id snd_hda_id_conexant[] = {
958 HDA_CODEC_ENTRY(0x14f12008, "CX8200", patch_conexant_auto),
958 HDA_CODEC_ENTRY(0x14f15045, "CX20549 (Venice)", patch_conexant_auto), 959 HDA_CODEC_ENTRY(0x14f15045, "CX20549 (Venice)", patch_conexant_auto),
959 HDA_CODEC_ENTRY(0x14f15047, "CX20551 (Waikiki)", patch_conexant_auto), 960 HDA_CODEC_ENTRY(0x14f15047, "CX20551 (Waikiki)", patch_conexant_auto),
960 HDA_CODEC_ENTRY(0x14f15051, "CX20561 (Hermosa)", patch_conexant_auto), 961 HDA_CODEC_ENTRY(0x14f15051, "CX20561 (Hermosa)", patch_conexant_auto),
@@ -972,9 +973,9 @@ static const struct hda_device_id snd_hda_id_conexant[] = {
972 HDA_CODEC_ENTRY(0x14f150ac, "CX20652", patch_conexant_auto), 973 HDA_CODEC_ENTRY(0x14f150ac, "CX20652", patch_conexant_auto),
973 HDA_CODEC_ENTRY(0x14f150b8, "CX20664", patch_conexant_auto), 974 HDA_CODEC_ENTRY(0x14f150b8, "CX20664", patch_conexant_auto),
974 HDA_CODEC_ENTRY(0x14f150b9, "CX20665", patch_conexant_auto), 975 HDA_CODEC_ENTRY(0x14f150b9, "CX20665", patch_conexant_auto),
975 HDA_CODEC_ENTRY(0x14f150f1, "CX20721", patch_conexant_auto), 976 HDA_CODEC_ENTRY(0x14f150f1, "CX21722", patch_conexant_auto),
976 HDA_CODEC_ENTRY(0x14f150f2, "CX20722", patch_conexant_auto), 977 HDA_CODEC_ENTRY(0x14f150f2, "CX20722", patch_conexant_auto),
977 HDA_CODEC_ENTRY(0x14f150f3, "CX20723", patch_conexant_auto), 978 HDA_CODEC_ENTRY(0x14f150f3, "CX21724", patch_conexant_auto),
978 HDA_CODEC_ENTRY(0x14f150f4, "CX20724", patch_conexant_auto), 979 HDA_CODEC_ENTRY(0x14f150f4, "CX20724", patch_conexant_auto),
979 HDA_CODEC_ENTRY(0x14f1510f, "CX20751/2", patch_conexant_auto), 980 HDA_CODEC_ENTRY(0x14f1510f, "CX20751/2", patch_conexant_auto),
980 HDA_CODEC_ENTRY(0x14f15110, "CX20751/2", patch_conexant_auto), 981 HDA_CODEC_ENTRY(0x14f15110, "CX20751/2", patch_conexant_auto),
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 60cd9e700909..4b6fb668c91c 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -2352,6 +2352,12 @@ static void intel_pin_eld_notify(void *audio_ptr, int port)
2352 struct hda_codec *codec = audio_ptr; 2352 struct hda_codec *codec = audio_ptr;
2353 int pin_nid = port + 0x04; 2353 int pin_nid = port + 0x04;
2354 2354
2355 /* skip notification during system suspend (but not in runtime PM);
2356 * the state will be updated at resume
2357 */
2358 if (snd_power_get_state(codec->card) != SNDRV_CTL_POWER_D0)
2359 return;
2360
2355 check_presence_and_report(codec, pin_nid); 2361 check_presence_and_report(codec, pin_nid);
2356} 2362}
2357 2363
@@ -2378,7 +2384,8 @@ static int patch_generic_hdmi(struct hda_codec *codec)
2378 * can cover the codec power request, and so need not set this flag. 2384 * can cover the codec power request, and so need not set this flag.
2379 * For previous platforms, there is no such power well feature. 2385 * For previous platforms, there is no such power well feature.
2380 */ 2386 */
2381 if (is_valleyview_plus(codec) || is_skylake(codec)) 2387 if (is_valleyview_plus(codec) || is_skylake(codec) ||
2388 is_broxton(codec))
2382 codec->core.link_power_control = 1; 2389 codec->core.link_power_control = 1;
2383 2390
2384 if (is_haswell_plus(codec) || is_valleyview_plus(codec)) { 2391 if (is_haswell_plus(codec) || is_valleyview_plus(codec)) {
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 2f7b065f9ac4..9bedf7c85e29 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1759,6 +1759,7 @@ enum {
1759 ALC882_FIXUP_NO_PRIMARY_HP, 1759 ALC882_FIXUP_NO_PRIMARY_HP,
1760 ALC887_FIXUP_ASUS_BASS, 1760 ALC887_FIXUP_ASUS_BASS,
1761 ALC887_FIXUP_BASS_CHMAP, 1761 ALC887_FIXUP_BASS_CHMAP,
1762 ALC882_FIXUP_DISABLE_AAMIX,
1762}; 1763};
1763 1764
1764static void alc889_fixup_coef(struct hda_codec *codec, 1765static void alc889_fixup_coef(struct hda_codec *codec,
@@ -1920,6 +1921,8 @@ static void alc882_fixup_no_primary_hp(struct hda_codec *codec,
1920 1921
1921static void alc_fixup_bass_chmap(struct hda_codec *codec, 1922static void alc_fixup_bass_chmap(struct hda_codec *codec,
1922 const struct hda_fixup *fix, int action); 1923 const struct hda_fixup *fix, int action);
1924static void alc_fixup_disable_aamix(struct hda_codec *codec,
1925 const struct hda_fixup *fix, int action);
1923 1926
1924static const struct hda_fixup alc882_fixups[] = { 1927static const struct hda_fixup alc882_fixups[] = {
1925 [ALC882_FIXUP_ABIT_AW9D_MAX] = { 1928 [ALC882_FIXUP_ABIT_AW9D_MAX] = {
@@ -2151,6 +2154,10 @@ static const struct hda_fixup alc882_fixups[] = {
2151 .type = HDA_FIXUP_FUNC, 2154 .type = HDA_FIXUP_FUNC,
2152 .v.func = alc_fixup_bass_chmap, 2155 .v.func = alc_fixup_bass_chmap,
2153 }, 2156 },
2157 [ALC882_FIXUP_DISABLE_AAMIX] = {
2158 .type = HDA_FIXUP_FUNC,
2159 .v.func = alc_fixup_disable_aamix,
2160 },
2154}; 2161};
2155 2162
2156static const struct snd_pci_quirk alc882_fixup_tbl[] = { 2163static const struct snd_pci_quirk alc882_fixup_tbl[] = {
@@ -2218,6 +2225,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
2218 SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD), 2225 SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
2219 SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3), 2226 SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
2220 SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE), 2227 SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
2228 SND_PCI_QUIRK(0x1458, 0xa182, "Gigabyte Z170X-UD3", ALC882_FIXUP_DISABLE_AAMIX),
2221 SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX), 2229 SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
2222 SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD), 2230 SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
2223 SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD), 2231 SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
@@ -4587,6 +4595,7 @@ enum {
4587 ALC292_FIXUP_DISABLE_AAMIX, 4595 ALC292_FIXUP_DISABLE_AAMIX,
4588 ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, 4596 ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
4589 ALC275_FIXUP_DELL_XPS, 4597 ALC275_FIXUP_DELL_XPS,
4598 ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE,
4590}; 4599};
4591 4600
4592static const struct hda_fixup alc269_fixups[] = { 4601static const struct hda_fixup alc269_fixups[] = {
@@ -5167,6 +5176,17 @@ static const struct hda_fixup alc269_fixups[] = {
5167 {} 5176 {}
5168 } 5177 }
5169 }, 5178 },
5179 [ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE] = {
5180 .type = HDA_FIXUP_VERBS,
5181 .v.verbs = (const struct hda_verb[]) {
5182 /* Disable pass-through path for FRONT 14h */
5183 {0x20, AC_VERB_SET_COEF_INDEX, 0x36},
5184 {0x20, AC_VERB_SET_PROC_COEF, 0x1737},
5185 {}
5186 },
5187 .chained = true,
5188 .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
5189 },
5170}; 5190};
5171 5191
5172static const struct snd_pci_quirk alc269_fixup_tbl[] = { 5192static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -5180,8 +5200,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5180 SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK), 5200 SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
5181 SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572), 5201 SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
5182 SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS), 5202 SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
5203 SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
5183 SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), 5204 SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
5184 SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS), 5205 SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
5206 SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X),
5185 SND_PCI_QUIRK(0x1028, 0x05ca, "Dell Latitude E7240", ALC292_FIXUP_DELL_E7X), 5207 SND_PCI_QUIRK(0x1028, 0x05ca, "Dell Latitude E7240", ALC292_FIXUP_DELL_E7X),
5186 SND_PCI_QUIRK(0x1028, 0x05cb, "Dell Latitude E7440", ALC292_FIXUP_DELL_E7X), 5208 SND_PCI_QUIRK(0x1028, 0x05cb, "Dell Latitude E7440", ALC292_FIXUP_DELL_E7X),
5187 SND_PCI_QUIRK(0x1028, 0x05da, "Dell Vostro 5460", ALC290_FIXUP_SUBWOOFER), 5209 SND_PCI_QUIRK(0x1028, 0x05da, "Dell Vostro 5460", ALC290_FIXUP_SUBWOOFER),
@@ -5204,6 +5226,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5204 SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX), 5226 SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
5205 SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC292_FIXUP_DISABLE_AAMIX), 5227 SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
5206 SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC292_FIXUP_DISABLE_AAMIX), 5228 SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
5229 SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
5207 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5230 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5208 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5231 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5209 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), 5232 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 826122d8acee..2c7c5eb8b1e9 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -3110,6 +3110,29 @@ static void stac92hd71bxx_fixup_hp_hdx(struct hda_codec *codec,
3110 spec->gpio_led = 0x08; 3110 spec->gpio_led = 0x08;
3111} 3111}
3112 3112
3113static bool is_hp_output(struct hda_codec *codec, hda_nid_t pin)
3114{
3115 unsigned int pin_cfg = snd_hda_codec_get_pincfg(codec, pin);
3116
3117 /* count line-out, too, as BIOS sets often so */
3118 return get_defcfg_connect(pin_cfg) != AC_JACK_PORT_NONE &&
3119 (get_defcfg_device(pin_cfg) == AC_JACK_LINE_OUT ||
3120 get_defcfg_device(pin_cfg) == AC_JACK_HP_OUT);
3121}
3122
3123static void fixup_hp_headphone(struct hda_codec *codec, hda_nid_t pin)
3124{
3125 unsigned int pin_cfg = snd_hda_codec_get_pincfg(codec, pin);
3126
3127 /* It was changed in the BIOS to just satisfy MS DTM.
3128 * Lets turn it back into slaved HP
3129 */
3130 pin_cfg = (pin_cfg & (~AC_DEFCFG_DEVICE)) |
3131 (AC_JACK_HP_OUT << AC_DEFCFG_DEVICE_SHIFT);
3132 pin_cfg = (pin_cfg & (~(AC_DEFCFG_DEF_ASSOC | AC_DEFCFG_SEQUENCE))) |
3133 0x1f;
3134 snd_hda_codec_set_pincfg(codec, pin, pin_cfg);
3135}
3113 3136
3114static void stac92hd71bxx_fixup_hp(struct hda_codec *codec, 3137static void stac92hd71bxx_fixup_hp(struct hda_codec *codec,
3115 const struct hda_fixup *fix, int action) 3138 const struct hda_fixup *fix, int action)
@@ -3119,22 +3142,12 @@ static void stac92hd71bxx_fixup_hp(struct hda_codec *codec,
3119 if (action != HDA_FIXUP_ACT_PRE_PROBE) 3142 if (action != HDA_FIXUP_ACT_PRE_PROBE)
3120 return; 3143 return;
3121 3144
3122 if (hp_blike_system(codec->core.subsystem_id)) { 3145 /* when both output A and F are assigned, these are supposedly
3123 unsigned int pin_cfg = snd_hda_codec_get_pincfg(codec, 0x0f); 3146 * dock and built-in headphones; fix both pin configs
3124 if (get_defcfg_device(pin_cfg) == AC_JACK_LINE_OUT || 3147 */
3125 get_defcfg_device(pin_cfg) == AC_JACK_SPEAKER || 3148 if (is_hp_output(codec, 0x0a) && is_hp_output(codec, 0x0f)) {
3126 get_defcfg_device(pin_cfg) == AC_JACK_HP_OUT) { 3149 fixup_hp_headphone(codec, 0x0a);
3127 /* It was changed in the BIOS to just satisfy MS DTM. 3150 fixup_hp_headphone(codec, 0x0f);
3128 * Lets turn it back into slaved HP
3129 */
3130 pin_cfg = (pin_cfg & (~AC_DEFCFG_DEVICE))
3131 | (AC_JACK_HP_OUT <<
3132 AC_DEFCFG_DEVICE_SHIFT);
3133 pin_cfg = (pin_cfg & (~(AC_DEFCFG_DEF_ASSOC
3134 | AC_DEFCFG_SEQUENCE)))
3135 | 0x1f;
3136 snd_hda_codec_set_pincfg(codec, 0x0f, pin_cfg);
3137 }
3138 } 3151 }
3139 3152
3140 if (find_mute_led_cfg(codec, 1)) 3153 if (find_mute_led_cfg(codec, 1))
diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
index 9929efc6b9aa..b3ea24d64c50 100644
--- a/sound/soc/codecs/arizona.c
+++ b/sound/soc/codecs/arizona.c
@@ -1023,24 +1023,18 @@ void arizona_init_dvfs(struct arizona_priv *priv)
1023} 1023}
1024EXPORT_SYMBOL_GPL(arizona_init_dvfs); 1024EXPORT_SYMBOL_GPL(arizona_init_dvfs);
1025 1025
1026static unsigned int arizona_sysclk_48k_rates[] = { 1026static unsigned int arizona_opclk_ref_48k_rates[] = {
1027 6144000, 1027 6144000,
1028 12288000, 1028 12288000,
1029 24576000, 1029 24576000,
1030 49152000, 1030 49152000,
1031 73728000,
1032 98304000,
1033 147456000,
1034}; 1031};
1035 1032
1036static unsigned int arizona_sysclk_44k1_rates[] = { 1033static unsigned int arizona_opclk_ref_44k1_rates[] = {
1037 5644800, 1034 5644800,
1038 11289600, 1035 11289600,
1039 22579200, 1036 22579200,
1040 45158400, 1037 45158400,
1041 67737600,
1042 90316800,
1043 135475200,
1044}; 1038};
1045 1039
1046static int arizona_set_opclk(struct snd_soc_codec *codec, unsigned int clk, 1040static int arizona_set_opclk(struct snd_soc_codec *codec, unsigned int clk,
@@ -1065,11 +1059,11 @@ static int arizona_set_opclk(struct snd_soc_codec *codec, unsigned int clk,
1065 } 1059 }
1066 1060
1067 if (refclk % 8000) 1061 if (refclk % 8000)
1068 rates = arizona_sysclk_44k1_rates; 1062 rates = arizona_opclk_ref_44k1_rates;
1069 else 1063 else
1070 rates = arizona_sysclk_48k_rates; 1064 rates = arizona_opclk_ref_48k_rates;
1071 1065
1072 for (ref = 0; ref < ARRAY_SIZE(arizona_sysclk_48k_rates) && 1066 for (ref = 0; ref < ARRAY_SIZE(arizona_opclk_ref_48k_rates) &&
1073 rates[ref] <= refclk; ref++) { 1067 rates[ref] <= refclk; ref++) {
1074 div = 1; 1068 div = 1;
1075 while (rates[ref] / div >= freq && div < 32) { 1069 while (rates[ref] / div >= freq && div < 32) {
diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c
index 969e337dc17c..84f5eb07a91b 100644
--- a/sound/soc/codecs/es8328.c
+++ b/sound/soc/codecs/es8328.c
@@ -205,18 +205,18 @@ static const struct snd_kcontrol_new es8328_right_line_controls =
205 205
206/* Left Mixer */ 206/* Left Mixer */
207static const struct snd_kcontrol_new es8328_left_mixer_controls[] = { 207static const struct snd_kcontrol_new es8328_left_mixer_controls[] = {
208 SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL17, 8, 1, 0), 208 SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL17, 7, 1, 0),
209 SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL17, 7, 1, 0), 209 SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL17, 6, 1, 0),
210 SOC_DAPM_SINGLE("Right Playback Switch", ES8328_DACCONTROL18, 8, 1, 0), 210 SOC_DAPM_SINGLE("Right Playback Switch", ES8328_DACCONTROL18, 7, 1, 0),
211 SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL18, 7, 1, 0), 211 SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL18, 6, 1, 0),
212}; 212};
213 213
214/* Right Mixer */ 214/* Right Mixer */
215static const struct snd_kcontrol_new es8328_right_mixer_controls[] = { 215static const struct snd_kcontrol_new es8328_right_mixer_controls[] = {
216 SOC_DAPM_SINGLE("Left Playback Switch", ES8328_DACCONTROL19, 8, 1, 0), 216 SOC_DAPM_SINGLE("Left Playback Switch", ES8328_DACCONTROL19, 7, 1, 0),
217 SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL19, 7, 1, 0), 217 SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL19, 6, 1, 0),
218 SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL20, 8, 1, 0), 218 SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL20, 7, 1, 0),
219 SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL20, 7, 1, 0), 219 SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL20, 6, 1, 0),
220}; 220};
221 221
222static const char * const es8328_pga_sel[] = { 222static const char * const es8328_pga_sel[] = {
diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c
index 7fc7b4e3f444..c1b87c5800b1 100644
--- a/sound/soc/codecs/nau8825.c
+++ b/sound/soc/codecs/nau8825.c
@@ -1271,6 +1271,36 @@ static int nau8825_i2c_remove(struct i2c_client *client)
1271 return 0; 1271 return 0;
1272} 1272}
1273 1273
1274#ifdef CONFIG_PM_SLEEP
1275static int nau8825_suspend(struct device *dev)
1276{
1277 struct i2c_client *client = to_i2c_client(dev);
1278 struct nau8825 *nau8825 = dev_get_drvdata(dev);
1279
1280 disable_irq(client->irq);
1281 regcache_cache_only(nau8825->regmap, true);
1282 regcache_mark_dirty(nau8825->regmap);
1283
1284 return 0;
1285}
1286
1287static int nau8825_resume(struct device *dev)
1288{
1289 struct i2c_client *client = to_i2c_client(dev);
1290 struct nau8825 *nau8825 = dev_get_drvdata(dev);
1291
1292 regcache_cache_only(nau8825->regmap, false);
1293 regcache_sync(nau8825->regmap);
1294 enable_irq(client->irq);
1295
1296 return 0;
1297}
1298#endif
1299
1300static const struct dev_pm_ops nau8825_pm = {
1301 SET_SYSTEM_SLEEP_PM_OPS(nau8825_suspend, nau8825_resume)
1302};
1303
1274static const struct i2c_device_id nau8825_i2c_ids[] = { 1304static const struct i2c_device_id nau8825_i2c_ids[] = {
1275 { "nau8825", 0 }, 1305 { "nau8825", 0 },
1276 { } 1306 { }
@@ -1297,6 +1327,7 @@ static struct i2c_driver nau8825_driver = {
1297 .name = "nau8825", 1327 .name = "nau8825",
1298 .of_match_table = of_match_ptr(nau8825_of_ids), 1328 .of_match_table = of_match_ptr(nau8825_of_ids),
1299 .acpi_match_table = ACPI_PTR(nau8825_acpi_match), 1329 .acpi_match_table = ACPI_PTR(nau8825_acpi_match),
1330 .pm = &nau8825_pm,
1300 }, 1331 },
1301 .probe = nau8825_i2c_probe, 1332 .probe = nau8825_i2c_probe,
1302 .remove = nau8825_i2c_remove, 1333 .remove = nau8825_i2c_remove,
diff --git a/sound/soc/codecs/rl6231.c b/sound/soc/codecs/rl6231.c
index aca479fa7670..1dc68ab08a17 100644
--- a/sound/soc/codecs/rl6231.c
+++ b/sound/soc/codecs/rl6231.c
@@ -80,8 +80,10 @@ int rl6231_calc_dmic_clk(int rate)
80 } 80 }
81 81
82 for (i = 0; i < ARRAY_SIZE(div); i++) { 82 for (i = 0; i < ARRAY_SIZE(div); i++) {
83 /* find divider that gives DMIC frequency below 3MHz */ 83 if ((div[i] % 3) == 0)
84 if (3000000 * div[i] >= rate) 84 continue;
85 /* find divider that gives DMIC frequency below 3.072MHz */
86 if (3072000 * div[i] >= rate)
85 return i; 87 return i;
86 } 88 }
87 89
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 28132375e427..ef76940f9dcb 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -245,7 +245,7 @@ struct rt5645_priv {
245 struct snd_soc_jack *hp_jack; 245 struct snd_soc_jack *hp_jack;
246 struct snd_soc_jack *mic_jack; 246 struct snd_soc_jack *mic_jack;
247 struct snd_soc_jack *btn_jack; 247 struct snd_soc_jack *btn_jack;
248 struct delayed_work jack_detect_work; 248 struct delayed_work jack_detect_work, rcclock_work;
249 struct regulator_bulk_data supplies[ARRAY_SIZE(rt5645_supply_names)]; 249 struct regulator_bulk_data supplies[ARRAY_SIZE(rt5645_supply_names)];
250 struct rt5645_eq_param_s *eq_param; 250 struct rt5645_eq_param_s *eq_param;
251 251
@@ -565,12 +565,33 @@ static int rt5645_hweq_put(struct snd_kcontrol *kcontrol,
565 .put = rt5645_hweq_put \ 565 .put = rt5645_hweq_put \
566} 566}
567 567
568static int rt5645_spk_put_volsw(struct snd_kcontrol *kcontrol,
569 struct snd_ctl_elem_value *ucontrol)
570{
571 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
572 struct rt5645_priv *rt5645 = snd_soc_component_get_drvdata(component);
573 int ret;
574
575 cancel_delayed_work_sync(&rt5645->rcclock_work);
576
577 regmap_update_bits(rt5645->regmap, RT5645_MICBIAS,
578 RT5645_PWR_CLK25M_MASK, RT5645_PWR_CLK25M_PU);
579
580 ret = snd_soc_put_volsw(kcontrol, ucontrol);
581
582 queue_delayed_work(system_power_efficient_wq, &rt5645->rcclock_work,
583 msecs_to_jiffies(200));
584
585 return ret;
586}
587
568static const struct snd_kcontrol_new rt5645_snd_controls[] = { 588static const struct snd_kcontrol_new rt5645_snd_controls[] = {
569 /* Speaker Output Volume */ 589 /* Speaker Output Volume */
570 SOC_DOUBLE("Speaker Channel Switch", RT5645_SPK_VOL, 590 SOC_DOUBLE("Speaker Channel Switch", RT5645_SPK_VOL,
571 RT5645_VOL_L_SFT, RT5645_VOL_R_SFT, 1, 1), 591 RT5645_VOL_L_SFT, RT5645_VOL_R_SFT, 1, 1),
572 SOC_DOUBLE_TLV("Speaker Playback Volume", RT5645_SPK_VOL, 592 SOC_DOUBLE_EXT_TLV("Speaker Playback Volume", RT5645_SPK_VOL,
573 RT5645_L_VOL_SFT, RT5645_R_VOL_SFT, 39, 1, out_vol_tlv), 593 RT5645_L_VOL_SFT, RT5645_R_VOL_SFT, 39, 1, snd_soc_get_volsw,
594 rt5645_spk_put_volsw, out_vol_tlv),
574 595
575 /* ClassD modulator Speaker Gain Ratio */ 596 /* ClassD modulator Speaker Gain Ratio */
576 SOC_SINGLE_TLV("Speaker ClassD Playback Volume", RT5645_SPO_CLSD_RATIO, 597 SOC_SINGLE_TLV("Speaker ClassD Playback Volume", RT5645_SPO_CLSD_RATIO,
@@ -1498,7 +1519,7 @@ static void hp_amp_power(struct snd_soc_codec *codec, int on)
1498 regmap_write(rt5645->regmap, RT5645_PR_BASE + 1519 regmap_write(rt5645->regmap, RT5645_PR_BASE +
1499 RT5645_MAMP_INT_REG2, 0xfc00); 1520 RT5645_MAMP_INT_REG2, 0xfc00);
1500 snd_soc_write(codec, RT5645_DEPOP_M2, 0x1140); 1521 snd_soc_write(codec, RT5645_DEPOP_M2, 0x1140);
1501 msleep(40); 1522 msleep(70);
1502 rt5645->hp_on = true; 1523 rt5645->hp_on = true;
1503 } else { 1524 } else {
1504 /* depop parameters */ 1525 /* depop parameters */
@@ -3122,6 +3143,15 @@ static void rt5645_jack_detect_work(struct work_struct *work)
3122 SND_JACK_BTN_2 | SND_JACK_BTN_3); 3143 SND_JACK_BTN_2 | SND_JACK_BTN_3);
3123} 3144}
3124 3145
3146static void rt5645_rcclock_work(struct work_struct *work)
3147{
3148 struct rt5645_priv *rt5645 =
3149 container_of(work, struct rt5645_priv, rcclock_work.work);
3150
3151 regmap_update_bits(rt5645->regmap, RT5645_MICBIAS,
3152 RT5645_PWR_CLK25M_MASK, RT5645_PWR_CLK25M_PD);
3153}
3154
3125static irqreturn_t rt5645_irq(int irq, void *data) 3155static irqreturn_t rt5645_irq(int irq, void *data)
3126{ 3156{
3127 struct rt5645_priv *rt5645 = data; 3157 struct rt5645_priv *rt5645 = data;
@@ -3348,6 +3378,27 @@ static const struct dmi_system_id dmi_platform_intel_braswell[] = {
3348 DMI_MATCH(DMI_PRODUCT_NAME, "Reks"), 3378 DMI_MATCH(DMI_PRODUCT_NAME, "Reks"),
3349 }, 3379 },
3350 }, 3380 },
3381 {
3382 .ident = "Google Edgar",
3383 .callback = strago_quirk_cb,
3384 .matches = {
3385 DMI_MATCH(DMI_PRODUCT_NAME, "Edgar"),
3386 },
3387 },
3388 {
3389 .ident = "Google Wizpig",
3390 .callback = strago_quirk_cb,
3391 .matches = {
3392 DMI_MATCH(DMI_PRODUCT_NAME, "Wizpig"),
3393 },
3394 },
3395 {
3396 .ident = "Google Terra",
3397 .callback = strago_quirk_cb,
3398 .matches = {
3399 DMI_MATCH(DMI_PRODUCT_NAME, "Terra"),
3400 },
3401 },
3351 { } 3402 { }
3352}; 3403};
3353 3404
@@ -3587,6 +3638,7 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
3587 } 3638 }
3588 3639
3589 INIT_DELAYED_WORK(&rt5645->jack_detect_work, rt5645_jack_detect_work); 3640 INIT_DELAYED_WORK(&rt5645->jack_detect_work, rt5645_jack_detect_work);
3641 INIT_DELAYED_WORK(&rt5645->rcclock_work, rt5645_rcclock_work);
3590 3642
3591 if (rt5645->i2c->irq) { 3643 if (rt5645->i2c->irq) {
3592 ret = request_threaded_irq(rt5645->i2c->irq, NULL, rt5645_irq, 3644 ret = request_threaded_irq(rt5645->i2c->irq, NULL, rt5645_irq,
@@ -3621,6 +3673,7 @@ static int rt5645_i2c_remove(struct i2c_client *i2c)
3621 free_irq(i2c->irq, rt5645); 3673 free_irq(i2c->irq, rt5645);
3622 3674
3623 cancel_delayed_work_sync(&rt5645->jack_detect_work); 3675 cancel_delayed_work_sync(&rt5645->jack_detect_work);
3676 cancel_delayed_work_sync(&rt5645->rcclock_work);
3624 3677
3625 snd_soc_unregister_codec(&i2c->dev); 3678 snd_soc_unregister_codec(&i2c->dev);
3626 regulator_bulk_disable(ARRAY_SIZE(rt5645->supplies), rt5645->supplies); 3679 regulator_bulk_disable(ARRAY_SIZE(rt5645->supplies), rt5645->supplies);
diff --git a/sound/soc/codecs/rt5670.h b/sound/soc/codecs/rt5670.h
index dc2b46236c5c..3f1b0f1df809 100644
--- a/sound/soc/codecs/rt5670.h
+++ b/sound/soc/codecs/rt5670.h
@@ -973,12 +973,12 @@
973#define RT5670_SCLK_SRC_MCLK (0x0 << 14) 973#define RT5670_SCLK_SRC_MCLK (0x0 << 14)
974#define RT5670_SCLK_SRC_PLL1 (0x1 << 14) 974#define RT5670_SCLK_SRC_PLL1 (0x1 << 14)
975#define RT5670_SCLK_SRC_RCCLK (0x2 << 14) /* 15MHz */ 975#define RT5670_SCLK_SRC_RCCLK (0x2 << 14) /* 15MHz */
976#define RT5670_PLL1_SRC_MASK (0x3 << 12) 976#define RT5670_PLL1_SRC_MASK (0x7 << 11)
977#define RT5670_PLL1_SRC_SFT 12 977#define RT5670_PLL1_SRC_SFT 11
978#define RT5670_PLL1_SRC_MCLK (0x0 << 12) 978#define RT5670_PLL1_SRC_MCLK (0x0 << 11)
979#define RT5670_PLL1_SRC_BCLK1 (0x1 << 12) 979#define RT5670_PLL1_SRC_BCLK1 (0x1 << 11)
980#define RT5670_PLL1_SRC_BCLK2 (0x2 << 12) 980#define RT5670_PLL1_SRC_BCLK2 (0x2 << 11)
981#define RT5670_PLL1_SRC_BCLK3 (0x3 << 12) 981#define RT5670_PLL1_SRC_BCLK3 (0x3 << 11)
982#define RT5670_PLL1_PD_MASK (0x1 << 3) 982#define RT5670_PLL1_PD_MASK (0x1 << 3)
983#define RT5670_PLL1_PD_SFT 3 983#define RT5670_PLL1_PD_SFT 3
984#define RT5670_PLL1_PD_1 (0x0 << 3) 984#define RT5670_PLL1_PD_1 (0x0 << 3)
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
index b4cd7e3bf5f8..69d987a9935c 100644
--- a/sound/soc/codecs/rt5677.c
+++ b/sound/soc/codecs/rt5677.c
@@ -1386,90 +1386,90 @@ static const struct snd_kcontrol_new rt5677_dac_r_mix[] = {
1386}; 1386};
1387 1387
1388static const struct snd_kcontrol_new rt5677_sto1_dac_l_mix[] = { 1388static const struct snd_kcontrol_new rt5677_sto1_dac_l_mix[] = {
1389 SOC_DAPM_SINGLE("ST L Switch", RT5677_STO1_DAC_MIXER, 1389 SOC_DAPM_SINGLE_AUTODISABLE("ST L Switch", RT5677_STO1_DAC_MIXER,
1390 RT5677_M_ST_DAC1_L_SFT, 1, 1), 1390 RT5677_M_ST_DAC1_L_SFT, 1, 1),
1391 SOC_DAPM_SINGLE("DAC1 L Switch", RT5677_STO1_DAC_MIXER, 1391 SOC_DAPM_SINGLE_AUTODISABLE("DAC1 L Switch", RT5677_STO1_DAC_MIXER,
1392 RT5677_M_DAC1_L_STO_L_SFT, 1, 1), 1392 RT5677_M_DAC1_L_STO_L_SFT, 1, 1),
1393 SOC_DAPM_SINGLE("DAC2 L Switch", RT5677_STO1_DAC_MIXER, 1393 SOC_DAPM_SINGLE_AUTODISABLE("DAC2 L Switch", RT5677_STO1_DAC_MIXER,
1394 RT5677_M_DAC2_L_STO_L_SFT, 1, 1), 1394 RT5677_M_DAC2_L_STO_L_SFT, 1, 1),
1395 SOC_DAPM_SINGLE("DAC1 R Switch", RT5677_STO1_DAC_MIXER, 1395 SOC_DAPM_SINGLE_AUTODISABLE("DAC1 R Switch", RT5677_STO1_DAC_MIXER,
1396 RT5677_M_DAC1_R_STO_L_SFT, 1, 1), 1396 RT5677_M_DAC1_R_STO_L_SFT, 1, 1),
1397}; 1397};
1398 1398
1399static const struct snd_kcontrol_new rt5677_sto1_dac_r_mix[] = { 1399static const struct snd_kcontrol_new rt5677_sto1_dac_r_mix[] = {
1400 SOC_DAPM_SINGLE("ST R Switch", RT5677_STO1_DAC_MIXER, 1400 SOC_DAPM_SINGLE_AUTODISABLE("ST R Switch", RT5677_STO1_DAC_MIXER,
1401 RT5677_M_ST_DAC1_R_SFT, 1, 1), 1401 RT5677_M_ST_DAC1_R_SFT, 1, 1),
1402 SOC_DAPM_SINGLE("DAC1 R Switch", RT5677_STO1_DAC_MIXER, 1402 SOC_DAPM_SINGLE_AUTODISABLE("DAC1 R Switch", RT5677_STO1_DAC_MIXER,
1403 RT5677_M_DAC1_R_STO_R_SFT, 1, 1), 1403 RT5677_M_DAC1_R_STO_R_SFT, 1, 1),
1404 SOC_DAPM_SINGLE("DAC2 R Switch", RT5677_STO1_DAC_MIXER, 1404 SOC_DAPM_SINGLE_AUTODISABLE("DAC2 R Switch", RT5677_STO1_DAC_MIXER,
1405 RT5677_M_DAC2_R_STO_R_SFT, 1, 1), 1405 RT5677_M_DAC2_R_STO_R_SFT, 1, 1),
1406 SOC_DAPM_SINGLE("DAC1 L Switch", RT5677_STO1_DAC_MIXER, 1406 SOC_DAPM_SINGLE_AUTODISABLE("DAC1 L Switch", RT5677_STO1_DAC_MIXER,
1407 RT5677_M_DAC1_L_STO_R_SFT, 1, 1), 1407 RT5677_M_DAC1_L_STO_R_SFT, 1, 1),
1408}; 1408};
1409 1409
1410static const struct snd_kcontrol_new rt5677_mono_dac_l_mix[] = { 1410static const struct snd_kcontrol_new rt5677_mono_dac_l_mix[] = {
1411 SOC_DAPM_SINGLE("ST L Switch", RT5677_MONO_DAC_MIXER, 1411 SOC_DAPM_SINGLE_AUTODISABLE("ST L Switch", RT5677_MONO_DAC_MIXER,
1412 RT5677_M_ST_DAC2_L_SFT, 1, 1), 1412 RT5677_M_ST_DAC2_L_SFT, 1, 1),
1413 SOC_DAPM_SINGLE("DAC1 L Switch", RT5677_MONO_DAC_MIXER, 1413 SOC_DAPM_SINGLE_AUTODISABLE("DAC1 L Switch", RT5677_MONO_DAC_MIXER,
1414 RT5677_M_DAC1_L_MONO_L_SFT, 1, 1), 1414 RT5677_M_DAC1_L_MONO_L_SFT, 1, 1),
1415 SOC_DAPM_SINGLE("DAC2 L Switch", RT5677_MONO_DAC_MIXER, 1415 SOC_DAPM_SINGLE_AUTODISABLE("DAC2 L Switch", RT5677_MONO_DAC_MIXER,
1416 RT5677_M_DAC2_L_MONO_L_SFT, 1, 1), 1416 RT5677_M_DAC2_L_MONO_L_SFT, 1, 1),
1417 SOC_DAPM_SINGLE("DAC2 R Switch", RT5677_MONO_DAC_MIXER, 1417 SOC_DAPM_SINGLE_AUTODISABLE("DAC2 R Switch", RT5677_MONO_DAC_MIXER,
1418 RT5677_M_DAC2_R_MONO_L_SFT, 1, 1), 1418 RT5677_M_DAC2_R_MONO_L_SFT, 1, 1),
1419}; 1419};
1420 1420
1421static const struct snd_kcontrol_new rt5677_mono_dac_r_mix[] = { 1421static const struct snd_kcontrol_new rt5677_mono_dac_r_mix[] = {
1422 SOC_DAPM_SINGLE("ST R Switch", RT5677_MONO_DAC_MIXER, 1422 SOC_DAPM_SINGLE_AUTODISABLE("ST R Switch", RT5677_MONO_DAC_MIXER,
1423 RT5677_M_ST_DAC2_R_SFT, 1, 1), 1423 RT5677_M_ST_DAC2_R_SFT, 1, 1),
1424 SOC_DAPM_SINGLE("DAC1 R Switch", RT5677_MONO_DAC_MIXER, 1424 SOC_DAPM_SINGLE_AUTODISABLE("DAC1 R Switch", RT5677_MONO_DAC_MIXER,
1425 RT5677_M_DAC1_R_MONO_R_SFT, 1, 1), 1425 RT5677_M_DAC1_R_MONO_R_SFT, 1, 1),
1426 SOC_DAPM_SINGLE("DAC2 R Switch", RT5677_MONO_DAC_MIXER, 1426 SOC_DAPM_SINGLE_AUTODISABLE("DAC2 R Switch", RT5677_MONO_DAC_MIXER,
1427 RT5677_M_DAC2_R_MONO_R_SFT, 1, 1), 1427 RT5677_M_DAC2_R_MONO_R_SFT, 1, 1),
1428 SOC_DAPM_SINGLE("DAC2 L Switch", RT5677_MONO_DAC_MIXER, 1428 SOC_DAPM_SINGLE_AUTODISABLE("DAC2 L Switch", RT5677_MONO_DAC_MIXER,
1429 RT5677_M_DAC2_L_MONO_R_SFT, 1, 1), 1429 RT5677_M_DAC2_L_MONO_R_SFT, 1, 1),
1430}; 1430};
1431 1431
1432static const struct snd_kcontrol_new rt5677_dd1_l_mix[] = { 1432static const struct snd_kcontrol_new rt5677_dd1_l_mix[] = {
1433 SOC_DAPM_SINGLE("Sto DAC Mix L Switch", RT5677_DD1_MIXER, 1433 SOC_DAPM_SINGLE_AUTODISABLE("Sto DAC Mix L Switch", RT5677_DD1_MIXER,
1434 RT5677_M_STO_L_DD1_L_SFT, 1, 1), 1434 RT5677_M_STO_L_DD1_L_SFT, 1, 1),
1435 SOC_DAPM_SINGLE("Mono DAC Mix L Switch", RT5677_DD1_MIXER, 1435 SOC_DAPM_SINGLE_AUTODISABLE("Mono DAC Mix L Switch", RT5677_DD1_MIXER,
1436 RT5677_M_MONO_L_DD1_L_SFT, 1, 1), 1436 RT5677_M_MONO_L_DD1_L_SFT, 1, 1),
1437 SOC_DAPM_SINGLE("DAC3 L Switch", RT5677_DD1_MIXER, 1437 SOC_DAPM_SINGLE_AUTODISABLE("DAC3 L Switch", RT5677_DD1_MIXER,
1438 RT5677_M_DAC3_L_DD1_L_SFT, 1, 1), 1438 RT5677_M_DAC3_L_DD1_L_SFT, 1, 1),
1439 SOC_DAPM_SINGLE("DAC3 R Switch", RT5677_DD1_MIXER, 1439 SOC_DAPM_SINGLE_AUTODISABLE("DAC3 R Switch", RT5677_DD1_MIXER,
1440 RT5677_M_DAC3_R_DD1_L_SFT, 1, 1), 1440 RT5677_M_DAC3_R_DD1_L_SFT, 1, 1),
1441}; 1441};
1442 1442
1443static const struct snd_kcontrol_new rt5677_dd1_r_mix[] = { 1443static const struct snd_kcontrol_new rt5677_dd1_r_mix[] = {
1444 SOC_DAPM_SINGLE("Sto DAC Mix R Switch", RT5677_DD1_MIXER, 1444 SOC_DAPM_SINGLE_AUTODISABLE("Sto DAC Mix R Switch", RT5677_DD1_MIXER,
1445 RT5677_M_STO_R_DD1_R_SFT, 1, 1), 1445 RT5677_M_STO_R_DD1_R_SFT, 1, 1),
1446 SOC_DAPM_SINGLE("Mono DAC Mix R Switch", RT5677_DD1_MIXER, 1446 SOC_DAPM_SINGLE_AUTODISABLE("Mono DAC Mix R Switch", RT5677_DD1_MIXER,
1447 RT5677_M_MONO_R_DD1_R_SFT, 1, 1), 1447 RT5677_M_MONO_R_DD1_R_SFT, 1, 1),
1448 SOC_DAPM_SINGLE("DAC3 R Switch", RT5677_DD1_MIXER, 1448 SOC_DAPM_SINGLE_AUTODISABLE("DAC3 R Switch", RT5677_DD1_MIXER,
1449 RT5677_M_DAC3_R_DD1_R_SFT, 1, 1), 1449 RT5677_M_DAC3_R_DD1_R_SFT, 1, 1),
1450 SOC_DAPM_SINGLE("DAC3 L Switch", RT5677_DD1_MIXER, 1450 SOC_DAPM_SINGLE_AUTODISABLE("DAC3 L Switch", RT5677_DD1_MIXER,
1451 RT5677_M_DAC3_L_DD1_R_SFT, 1, 1), 1451 RT5677_M_DAC3_L_DD1_R_SFT, 1, 1),
1452}; 1452};
1453 1453
1454static const struct snd_kcontrol_new rt5677_dd2_l_mix[] = { 1454static const struct snd_kcontrol_new rt5677_dd2_l_mix[] = {
1455 SOC_DAPM_SINGLE("Sto DAC Mix L Switch", RT5677_DD2_MIXER, 1455 SOC_DAPM_SINGLE_AUTODISABLE("Sto DAC Mix L Switch", RT5677_DD2_MIXER,
1456 RT5677_M_STO_L_DD2_L_SFT, 1, 1), 1456 RT5677_M_STO_L_DD2_L_SFT, 1, 1),
1457 SOC_DAPM_SINGLE("Mono DAC Mix L Switch", RT5677_DD2_MIXER, 1457 SOC_DAPM_SINGLE_AUTODISABLE("Mono DAC Mix L Switch", RT5677_DD2_MIXER,
1458 RT5677_M_MONO_L_DD2_L_SFT, 1, 1), 1458 RT5677_M_MONO_L_DD2_L_SFT, 1, 1),
1459 SOC_DAPM_SINGLE("DAC4 L Switch", RT5677_DD2_MIXER, 1459 SOC_DAPM_SINGLE_AUTODISABLE("DAC4 L Switch", RT5677_DD2_MIXER,
1460 RT5677_M_DAC4_L_DD2_L_SFT, 1, 1), 1460 RT5677_M_DAC4_L_DD2_L_SFT, 1, 1),
1461 SOC_DAPM_SINGLE("DAC4 R Switch", RT5677_DD2_MIXER, 1461 SOC_DAPM_SINGLE_AUTODISABLE("DAC4 R Switch", RT5677_DD2_MIXER,
1462 RT5677_M_DAC4_R_DD2_L_SFT, 1, 1), 1462 RT5677_M_DAC4_R_DD2_L_SFT, 1, 1),
1463}; 1463};
1464 1464
1465static const struct snd_kcontrol_new rt5677_dd2_r_mix[] = { 1465static const struct snd_kcontrol_new rt5677_dd2_r_mix[] = {
1466 SOC_DAPM_SINGLE("Sto DAC Mix R Switch", RT5677_DD2_MIXER, 1466 SOC_DAPM_SINGLE_AUTODISABLE("Sto DAC Mix R Switch", RT5677_DD2_MIXER,
1467 RT5677_M_STO_R_DD2_R_SFT, 1, 1), 1467 RT5677_M_STO_R_DD2_R_SFT, 1, 1),
1468 SOC_DAPM_SINGLE("Mono DAC Mix R Switch", RT5677_DD2_MIXER, 1468 SOC_DAPM_SINGLE_AUTODISABLE("Mono DAC Mix R Switch", RT5677_DD2_MIXER,
1469 RT5677_M_MONO_R_DD2_R_SFT, 1, 1), 1469 RT5677_M_MONO_R_DD2_R_SFT, 1, 1),
1470 SOC_DAPM_SINGLE("DAC4 R Switch", RT5677_DD2_MIXER, 1470 SOC_DAPM_SINGLE_AUTODISABLE("DAC4 R Switch", RT5677_DD2_MIXER,
1471 RT5677_M_DAC4_R_DD2_R_SFT, 1, 1), 1471 RT5677_M_DAC4_R_DD2_R_SFT, 1, 1),
1472 SOC_DAPM_SINGLE("DAC4 L Switch", RT5677_DD2_MIXER, 1472 SOC_DAPM_SINGLE_AUTODISABLE("DAC4 L Switch", RT5677_DD2_MIXER,
1473 RT5677_M_DAC4_L_DD2_R_SFT, 1, 1), 1473 RT5677_M_DAC4_L_DD2_R_SFT, 1, 1),
1474}; 1474};
1475 1475
@@ -2596,6 +2596,21 @@ static int rt5677_vref_event(struct snd_soc_dapm_widget *w,
2596 return 0; 2596 return 0;
2597} 2597}
2598 2598
2599static int rt5677_filter_power_event(struct snd_soc_dapm_widget *w,
2600 struct snd_kcontrol *kcontrol, int event)
2601{
2602 switch (event) {
2603 case SND_SOC_DAPM_POST_PMU:
2604 msleep(50);
2605 break;
2606
2607 default:
2608 return 0;
2609 }
2610
2611 return 0;
2612}
2613
2599static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = { 2614static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = {
2600 SND_SOC_DAPM_SUPPLY("PLL1", RT5677_PWR_ANLG2, RT5677_PWR_PLL1_BIT, 2615 SND_SOC_DAPM_SUPPLY("PLL1", RT5677_PWR_ANLG2, RT5677_PWR_PLL1_BIT,
2601 0, rt5677_set_pll1_event, SND_SOC_DAPM_PRE_PMU | 2616 0, rt5677_set_pll1_event, SND_SOC_DAPM_PRE_PMU |
@@ -3072,19 +3087,26 @@ static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = {
3072 3087
3073 /* DAC Mixer */ 3088 /* DAC Mixer */
3074 SND_SOC_DAPM_SUPPLY("dac stereo1 filter", RT5677_PWR_DIG2, 3089 SND_SOC_DAPM_SUPPLY("dac stereo1 filter", RT5677_PWR_DIG2,
3075 RT5677_PWR_DAC_S1F_BIT, 0, NULL, 0), 3090 RT5677_PWR_DAC_S1F_BIT, 0, rt5677_filter_power_event,
3091 SND_SOC_DAPM_POST_PMU),
3076 SND_SOC_DAPM_SUPPLY("dac mono2 left filter", RT5677_PWR_DIG2, 3092 SND_SOC_DAPM_SUPPLY("dac mono2 left filter", RT5677_PWR_DIG2,
3077 RT5677_PWR_DAC_M2F_L_BIT, 0, NULL, 0), 3093 RT5677_PWR_DAC_M2F_L_BIT, 0, rt5677_filter_power_event,
3094 SND_SOC_DAPM_POST_PMU),
3078 SND_SOC_DAPM_SUPPLY("dac mono2 right filter", RT5677_PWR_DIG2, 3095 SND_SOC_DAPM_SUPPLY("dac mono2 right filter", RT5677_PWR_DIG2,
3079 RT5677_PWR_DAC_M2F_R_BIT, 0, NULL, 0), 3096 RT5677_PWR_DAC_M2F_R_BIT, 0, rt5677_filter_power_event,
3097 SND_SOC_DAPM_POST_PMU),
3080 SND_SOC_DAPM_SUPPLY("dac mono3 left filter", RT5677_PWR_DIG2, 3098 SND_SOC_DAPM_SUPPLY("dac mono3 left filter", RT5677_PWR_DIG2,
3081 RT5677_PWR_DAC_M3F_L_BIT, 0, NULL, 0), 3099 RT5677_PWR_DAC_M3F_L_BIT, 0, rt5677_filter_power_event,
3100 SND_SOC_DAPM_POST_PMU),
3082 SND_SOC_DAPM_SUPPLY("dac mono3 right filter", RT5677_PWR_DIG2, 3101 SND_SOC_DAPM_SUPPLY("dac mono3 right filter", RT5677_PWR_DIG2,
3083 RT5677_PWR_DAC_M3F_R_BIT, 0, NULL, 0), 3102 RT5677_PWR_DAC_M3F_R_BIT, 0, rt5677_filter_power_event,
3103 SND_SOC_DAPM_POST_PMU),
3084 SND_SOC_DAPM_SUPPLY("dac mono4 left filter", RT5677_PWR_DIG2, 3104 SND_SOC_DAPM_SUPPLY("dac mono4 left filter", RT5677_PWR_DIG2,
3085 RT5677_PWR_DAC_M4F_L_BIT, 0, NULL, 0), 3105 RT5677_PWR_DAC_M4F_L_BIT, 0, rt5677_filter_power_event,
3106 SND_SOC_DAPM_POST_PMU),
3086 SND_SOC_DAPM_SUPPLY("dac mono4 right filter", RT5677_PWR_DIG2, 3107 SND_SOC_DAPM_SUPPLY("dac mono4 right filter", RT5677_PWR_DIG2,
3087 RT5677_PWR_DAC_M4F_R_BIT, 0, NULL, 0), 3108 RT5677_PWR_DAC_M4F_R_BIT, 0, rt5677_filter_power_event,
3109 SND_SOC_DAPM_POST_PMU),
3088 3110
3089 SND_SOC_DAPM_MIXER("Stereo DAC MIXL", SND_SOC_NOPM, 0, 0, 3111 SND_SOC_DAPM_MIXER("Stereo DAC MIXL", SND_SOC_NOPM, 0, 0,
3090 rt5677_sto1_dac_l_mix, ARRAY_SIZE(rt5677_sto1_dac_l_mix)), 3112 rt5677_sto1_dac_l_mix, ARRAY_SIZE(rt5677_sto1_dac_l_mix)),
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index 056375339ea3..5380798883b5 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -229,7 +229,7 @@ SOC_DOUBLE_R_TLV("Capture Volume", WM8960_LINVOL, WM8960_RINVOL,
229SOC_DOUBLE_R("Capture Volume ZC Switch", WM8960_LINVOL, WM8960_RINVOL, 229SOC_DOUBLE_R("Capture Volume ZC Switch", WM8960_LINVOL, WM8960_RINVOL,
230 6, 1, 0), 230 6, 1, 0),
231SOC_DOUBLE_R("Capture Switch", WM8960_LINVOL, WM8960_RINVOL, 231SOC_DOUBLE_R("Capture Switch", WM8960_LINVOL, WM8960_RINVOL,
232 7, 1, 0), 232 7, 1, 1),
233 233
234SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT3 Volume", 234SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT3 Volume",
235 WM8960_INBMIX1, 4, 7, 0, lineinboost_tlv), 235 WM8960_INBMIX1, 4, 7, 0, lineinboost_tlv),
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 39ebd7bf4f53..a7e79784fc16 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -365,8 +365,8 @@ static const struct reg_default wm8962_reg[] = {
365 { 16924, 0x0059 }, /* R16924 - HDBASS_PG_1 */ 365 { 16924, 0x0059 }, /* R16924 - HDBASS_PG_1 */
366 { 16925, 0x999A }, /* R16925 - HDBASS_PG_0 */ 366 { 16925, 0x999A }, /* R16925 - HDBASS_PG_0 */
367 367
368 { 17048, 0x0083 }, /* R17408 - HPF_C_1 */ 368 { 17408, 0x0083 }, /* R17408 - HPF_C_1 */
369 { 17049, 0x98AD }, /* R17409 - HPF_C_0 */ 369 { 17409, 0x98AD }, /* R17409 - HPF_C_0 */
370 370
371 { 17920, 0x007F }, /* R17920 - ADCL_RETUNE_C1_1 */ 371 { 17920, 0x007F }, /* R17920 - ADCL_RETUNE_C1_1 */
372 { 17921, 0xFFFF }, /* R17921 - ADCL_RETUNE_C1_0 */ 372 { 17921, 0xFFFF }, /* R17921 - ADCL_RETUNE_C1_0 */
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index 4495a40a9468..c1c9c2e3525b 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -681,8 +681,8 @@ static int davinci_mcasp_set_tdm_slot(struct snd_soc_dai *dai,
681 } 681 }
682 682
683 mcasp->tdm_slots = slots; 683 mcasp->tdm_slots = slots;
684 mcasp->tdm_mask[SNDRV_PCM_STREAM_PLAYBACK] = rx_mask; 684 mcasp->tdm_mask[SNDRV_PCM_STREAM_PLAYBACK] = tx_mask;
685 mcasp->tdm_mask[SNDRV_PCM_STREAM_CAPTURE] = tx_mask; 685 mcasp->tdm_mask[SNDRV_PCM_STREAM_CAPTURE] = rx_mask;
686 mcasp->slot_width = slot_width; 686 mcasp->slot_width = slot_width;
687 687
688 return davinci_mcasp_set_ch_constraints(mcasp); 688 return davinci_mcasp_set_ch_constraints(mcasp);
@@ -908,6 +908,14 @@ static int mcasp_i2s_hw_param(struct davinci_mcasp *mcasp, int stream,
908 mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMT_REG, busel | RXORD); 908 mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMT_REG, busel | RXORD);
909 mcasp_mod_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG, 909 mcasp_mod_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG,
910 FSRMOD(total_slots), FSRMOD(0x1FF)); 910 FSRMOD(total_slots), FSRMOD(0x1FF));
911 /*
912 * If McASP is set to be TX/RX synchronous and the playback is
913 * not running already we need to configure the TX slots in
914 * order to have correct FSX on the bus
915 */
916 if (mcasp_is_synchronous(mcasp) && !mcasp->channels)
917 mcasp_mod_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG,
918 FSXMOD(total_slots), FSXMOD(0x1FF));
911 } 919 }
912 920
913 return 0; 921 return 0;
diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig
index 19c302b0d763..14dfdee05fd5 100644
--- a/sound/soc/fsl/Kconfig
+++ b/sound/soc/fsl/Kconfig
@@ -283,6 +283,8 @@ config SND_SOC_IMX_MC13783
283config SND_SOC_FSL_ASOC_CARD 283config SND_SOC_FSL_ASOC_CARD
284 tristate "Generic ASoC Sound Card with ASRC support" 284 tristate "Generic ASoC Sound Card with ASRC support"
285 depends on OF && I2C 285 depends on OF && I2C
286 # enforce SND_SOC_FSL_ASOC_CARD=m if SND_AC97_CODEC=m:
287 depends on SND_AC97_CODEC || SND_AC97_CODEC=n
286 select SND_SOC_IMX_AUDMUX 288 select SND_SOC_IMX_AUDMUX
287 select SND_SOC_IMX_PCM_DMA 289 select SND_SOC_IMX_PCM_DMA
288 select SND_SOC_FSL_ESAI 290 select SND_SOC_FSL_ESAI
diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
index a4435f5e3be9..ffd5f9acc849 100644
--- a/sound/soc/fsl/fsl_sai.c
+++ b/sound/soc/fsl/fsl_sai.c
@@ -454,7 +454,8 @@ static int fsl_sai_trigger(struct snd_pcm_substream *substream, int cmd,
454 * Rx sync with Tx clocks: Clear SYNC for Tx, set it for Rx. 454 * Rx sync with Tx clocks: Clear SYNC for Tx, set it for Rx.
455 * Tx sync with Rx clocks: Clear SYNC for Rx, set it for Tx. 455 * Tx sync with Rx clocks: Clear SYNC for Rx, set it for Tx.
456 */ 456 */
457 regmap_update_bits(sai->regmap, FSL_SAI_TCR2, FSL_SAI_CR2_SYNC, 0); 457 regmap_update_bits(sai->regmap, FSL_SAI_TCR2, FSL_SAI_CR2_SYNC,
458 sai->synchronous[TX] ? FSL_SAI_CR2_SYNC : 0);
458 regmap_update_bits(sai->regmap, FSL_SAI_RCR2, FSL_SAI_CR2_SYNC, 459 regmap_update_bits(sai->regmap, FSL_SAI_RCR2, FSL_SAI_CR2_SYNC,
459 sai->synchronous[RX] ? FSL_SAI_CR2_SYNC : 0); 460 sai->synchronous[RX] ? FSL_SAI_CR2_SYNC : 0);
460 461
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index 7b778ab85f8b..d430ef5a4f38 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -144,7 +144,7 @@ config SND_SOC_INTEL_SKYLAKE
144 144
145config SND_SOC_INTEL_SKL_RT286_MACH 145config SND_SOC_INTEL_SKL_RT286_MACH
146 tristate "ASoC Audio driver for SKL with RT286 I2S mode" 146 tristate "ASoC Audio driver for SKL with RT286 I2S mode"
147 depends on X86 && ACPI 147 depends on X86 && ACPI && I2C
148 select SND_SOC_INTEL_SST 148 select SND_SOC_INTEL_SST
149 select SND_SOC_INTEL_SKYLAKE 149 select SND_SOC_INTEL_SKYLAKE
150 select SND_SOC_RT286 150 select SND_SOC_RT286
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
index a7854c8fc523..ffea427aeca8 100644
--- a/sound/soc/intel/skylake/skl-topology.c
+++ b/sound/soc/intel/skylake/skl-topology.c
@@ -1240,6 +1240,7 @@ int skl_tplg_init(struct snd_soc_platform *platform, struct hdac_ext_bus *ebus)
1240 */ 1240 */
1241 ret = snd_soc_tplg_component_load(&platform->component, 1241 ret = snd_soc_tplg_component_load(&platform->component,
1242 &skl_tplg_ops, fw, 0); 1242 &skl_tplg_ops, fw, 0);
1243 release_firmware(fw);
1243 if (ret < 0) { 1244 if (ret < 0) {
1244 dev_err(bus->dev, "tplg component load failed%d\n", ret); 1245 dev_err(bus->dev, "tplg component load failed%d\n", ret);
1245 return -EINVAL; 1246 return -EINVAL;
diff --git a/sound/soc/rockchip/rockchip_spdif.c b/sound/soc/rockchip/rockchip_spdif.c
index a38a3029062c..ac72ff5055bb 100644
--- a/sound/soc/rockchip/rockchip_spdif.c
+++ b/sound/soc/rockchip/rockchip_spdif.c
@@ -280,7 +280,7 @@ static int rk_spdif_probe(struct platform_device *pdev)
280 int ret; 280 int ret;
281 281
282 match = of_match_node(rk_spdif_match, np); 282 match = of_match_node(rk_spdif_match, np);
283 if ((int) match->data == RK_SPDIF_RK3288) { 283 if (match->data == (void *)RK_SPDIF_RK3288) {
284 struct regmap *grf; 284 struct regmap *grf;
285 285
286 grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf"); 286 grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
diff --git a/sound/soc/rockchip/rockchip_spdif.h b/sound/soc/rockchip/rockchip_spdif.h
index 07f86a21046a..921b4095fb92 100644
--- a/sound/soc/rockchip/rockchip_spdif.h
+++ b/sound/soc/rockchip/rockchip_spdif.h
@@ -28,9 +28,9 @@
28#define SPDIF_CFGR_VDW(x) (x << SPDIF_CFGR_VDW_SHIFT) 28#define SPDIF_CFGR_VDW(x) (x << SPDIF_CFGR_VDW_SHIFT)
29#define SDPIF_CFGR_VDW_MASK (0xf << SPDIF_CFGR_VDW_SHIFT) 29#define SDPIF_CFGR_VDW_MASK (0xf << SPDIF_CFGR_VDW_SHIFT)
30 30
31#define SPDIF_CFGR_VDW_16 SPDIF_CFGR_VDW(0x00) 31#define SPDIF_CFGR_VDW_16 SPDIF_CFGR_VDW(0x0)
32#define SPDIF_CFGR_VDW_20 SPDIF_CFGR_VDW(0x01) 32#define SPDIF_CFGR_VDW_20 SPDIF_CFGR_VDW(0x1)
33#define SPDIF_CFGR_VDW_24 SPDIF_CFGR_VDW(0x10) 33#define SPDIF_CFGR_VDW_24 SPDIF_CFGR_VDW(0x2)
34 34
35/* 35/*
36 * DMACR 36 * DMACR
diff --git a/sound/soc/sh/rcar/gen.c b/sound/soc/sh/rcar/gen.c
index 76da7620904c..edcf4cc2e84f 100644
--- a/sound/soc/sh/rcar/gen.c
+++ b/sound/soc/sh/rcar/gen.c
@@ -235,7 +235,7 @@ static int rsnd_gen2_probe(struct platform_device *pdev,
235 RSND_GEN_S_REG(SCU_SYS_STATUS0, 0x1c8), 235 RSND_GEN_S_REG(SCU_SYS_STATUS0, 0x1c8),
236 RSND_GEN_S_REG(SCU_SYS_INT_EN0, 0x1cc), 236 RSND_GEN_S_REG(SCU_SYS_INT_EN0, 0x1cc),
237 RSND_GEN_S_REG(SCU_SYS_STATUS1, 0x1d0), 237 RSND_GEN_S_REG(SCU_SYS_STATUS1, 0x1d0),
238 RSND_GEN_S_REG(SCU_SYS_INT_EN1, 0x1c4), 238 RSND_GEN_S_REG(SCU_SYS_INT_EN1, 0x1d4),
239 RSND_GEN_M_REG(SRC_SWRSR, 0x200, 0x40), 239 RSND_GEN_M_REG(SRC_SWRSR, 0x200, 0x40),
240 RSND_GEN_M_REG(SRC_SRCIR, 0x204, 0x40), 240 RSND_GEN_M_REG(SRC_SRCIR, 0x204, 0x40),
241 RSND_GEN_M_REG(SRC_ADINR, 0x214, 0x40), 241 RSND_GEN_M_REG(SRC_ADINR, 0x214, 0x40),
diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c
index 261b50217c48..68b439ed22d7 100644
--- a/sound/soc/sh/rcar/src.c
+++ b/sound/soc/sh/rcar/src.c
@@ -923,6 +923,7 @@ static int rsnd_src_pcm_new_gen2(struct rsnd_mod *mod,
923 struct snd_soc_pcm_runtime *rtd) 923 struct snd_soc_pcm_runtime *rtd)
924{ 924{
925 struct rsnd_dai *rdai = rsnd_io_to_rdai(io); 925 struct rsnd_dai *rdai = rsnd_io_to_rdai(io);
926 struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
926 struct rsnd_src *src = rsnd_mod_to_src(mod); 927 struct rsnd_src *src = rsnd_mod_to_src(mod);
927 int ret; 928 int ret;
928 929
@@ -937,6 +938,12 @@ static int rsnd_src_pcm_new_gen2(struct rsnd_mod *mod,
937 return 0; 938 return 0;
938 939
939 /* 940 /*
941 * SRC In doesn't work if DVC was enabled
942 */
943 if (dvc && !rsnd_io_is_play(io))
944 return 0;
945
946 /*
940 * enable sync convert 947 * enable sync convert
941 */ 948 */
942 ret = rsnd_kctrl_new_s(mod, io, rtd, 949 ret = rsnd_kctrl_new_s(mod, io, rtd,
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 24b096066a07..a1305f827a98 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -795,12 +795,12 @@ static void soc_resume_deferred(struct work_struct *work)
795 795
796 dev_dbg(card->dev, "ASoC: resume work completed\n"); 796 dev_dbg(card->dev, "ASoC: resume work completed\n");
797 797
798 /* userspace can access us now we are back as we were before */
799 snd_power_change_state(card->snd_card, SNDRV_CTL_POWER_D0);
800
801 /* Recheck all endpoints too, their state is affected by suspend */ 798 /* Recheck all endpoints too, their state is affected by suspend */
802 dapm_mark_endpoints_dirty(card); 799 dapm_mark_endpoints_dirty(card);
803 snd_soc_dapm_sync(&card->dapm); 800 snd_soc_dapm_sync(&card->dapm);
801
802 /* userspace can access us now we are back as we were before */
803 snd_power_change_state(card->snd_card, SNDRV_CTL_POWER_D0);
804} 804}
805 805
806/* powers up audio subsystem after a suspend */ 806/* powers up audio subsystem after a suspend */
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 016eba10b1ec..7d009428934a 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -2293,6 +2293,12 @@ void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w)
2293 kfree(w); 2293 kfree(w);
2294} 2294}
2295 2295
2296void snd_soc_dapm_reset_cache(struct snd_soc_dapm_context *dapm)
2297{
2298 dapm->path_sink_cache.widget = NULL;
2299 dapm->path_source_cache.widget = NULL;
2300}
2301
2296/* free all dapm widgets and resources */ 2302/* free all dapm widgets and resources */
2297static void dapm_free_widgets(struct snd_soc_dapm_context *dapm) 2303static void dapm_free_widgets(struct snd_soc_dapm_context *dapm)
2298{ 2304{
@@ -2303,6 +2309,7 @@ static void dapm_free_widgets(struct snd_soc_dapm_context *dapm)
2303 continue; 2309 continue;
2304 snd_soc_dapm_free_widget(w); 2310 snd_soc_dapm_free_widget(w);
2305 } 2311 }
2312 snd_soc_dapm_reset_cache(dapm);
2306} 2313}
2307 2314
2308static struct snd_soc_dapm_widget *dapm_find_widget( 2315static struct snd_soc_dapm_widget *dapm_find_widget(
diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
index ecd38e52285a..2f67ba6d7a8f 100644
--- a/sound/soc/soc-ops.c
+++ b/sound/soc/soc-ops.c
@@ -404,7 +404,7 @@ EXPORT_SYMBOL_GPL(snd_soc_get_volsw_sx);
404/** 404/**
405 * snd_soc_put_volsw_sx - double mixer set callback 405 * snd_soc_put_volsw_sx - double mixer set callback
406 * @kcontrol: mixer control 406 * @kcontrol: mixer control
407 * @uinfo: control element information 407 * @ucontrol: control element information
408 * 408 *
409 * Callback to set the value of a double mixer control that spans 2 registers. 409 * Callback to set the value of a double mixer control that spans 2 registers.
410 * 410 *
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 8d7ec80af51b..6963ba20991c 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -531,7 +531,7 @@ static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr,
531 /* TLV bytes controls need standard kcontrol info handler, 531 /* TLV bytes controls need standard kcontrol info handler,
532 * TLV callback and extended put/get handlers. 532 * TLV callback and extended put/get handlers.
533 */ 533 */
534 k->info = snd_soc_bytes_info; 534 k->info = snd_soc_bytes_info_ext;
535 k->tlv.c = snd_soc_bytes_tlv_callback; 535 k->tlv.c = snd_soc_bytes_tlv_callback;
536 536
537 ext_ops = tplg->bytes_ext_ops; 537 ext_ops = tplg->bytes_ext_ops;
@@ -1805,6 +1805,7 @@ void snd_soc_tplg_widget_remove_all(struct snd_soc_dapm_context *dapm,
1805 snd_soc_tplg_widget_remove(w); 1805 snd_soc_tplg_widget_remove(w);
1806 snd_soc_dapm_free_widget(w); 1806 snd_soc_dapm_free_widget(w);
1807 } 1807 }
1808 snd_soc_dapm_reset_cache(dapm);
1808} 1809}
1809EXPORT_SYMBOL_GPL(snd_soc_tplg_widget_remove_all); 1810EXPORT_SYMBOL_GPL(snd_soc_tplg_widget_remove_all);
1810 1811
diff --git a/sound/soc/sti/uniperif_player.c b/sound/soc/sti/uniperif_player.c
index 843f037a317d..5c2bc53f0a9b 100644
--- a/sound/soc/sti/uniperif_player.c
+++ b/sound/soc/sti/uniperif_player.c
@@ -669,6 +669,7 @@ static int uni_player_startup(struct snd_pcm_substream *substream,
669{ 669{
670 struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai); 670 struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
671 struct uniperif *player = priv->dai_data.uni; 671 struct uniperif *player = priv->dai_data.uni;
672 player->substream = substream;
672 673
673 player->clk_adj = 0; 674 player->clk_adj = 0;
674 675
@@ -950,6 +951,8 @@ static void uni_player_shutdown(struct snd_pcm_substream *substream,
950 if (player->state != UNIPERIF_STATE_STOPPED) 951 if (player->state != UNIPERIF_STATE_STOPPED)
951 /* Stop the player */ 952 /* Stop the player */
952 uni_player_stop(player); 953 uni_player_stop(player);
954
955 player->substream = NULL;
953} 956}
954 957
955static int uni_player_parse_dt_clk_glue(struct platform_device *pdev, 958static int uni_player_parse_dt_clk_glue(struct platform_device *pdev,
@@ -989,7 +992,7 @@ static int uni_player_parse_dt(struct platform_device *pdev,
989 if (!info) 992 if (!info)
990 return -ENOMEM; 993 return -ENOMEM;
991 994
992 if (of_property_read_u32(pnode, "version", &player->ver) || 995 if (of_property_read_u32(pnode, "st,version", &player->ver) ||
993 player->ver == SND_ST_UNIPERIF_VERSION_UNKNOWN) { 996 player->ver == SND_ST_UNIPERIF_VERSION_UNKNOWN) {
994 dev_err(dev, "Unknown uniperipheral version "); 997 dev_err(dev, "Unknown uniperipheral version ");
995 return -EINVAL; 998 return -EINVAL;
@@ -998,13 +1001,13 @@ static int uni_player_parse_dt(struct platform_device *pdev,
998 if (player->ver >= SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0) 1001 if (player->ver >= SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
999 info->underflow_enabled = 1; 1002 info->underflow_enabled = 1;
1000 1003
1001 if (of_property_read_u32(pnode, "uniperiph-id", &info->id)) { 1004 if (of_property_read_u32(pnode, "st,uniperiph-id", &info->id)) {
1002 dev_err(dev, "uniperipheral id not defined"); 1005 dev_err(dev, "uniperipheral id not defined");
1003 return -EINVAL; 1006 return -EINVAL;
1004 } 1007 }
1005 1008
1006 /* Read the device mode property */ 1009 /* Read the device mode property */
1007 if (of_property_read_string(pnode, "mode", &mode)) { 1010 if (of_property_read_string(pnode, "st,mode", &mode)) {
1008 dev_err(dev, "uniperipheral mode not defined"); 1011 dev_err(dev, "uniperipheral mode not defined");
1009 return -EINVAL; 1012 return -EINVAL;
1010 } 1013 }
diff --git a/sound/soc/sti/uniperif_reader.c b/sound/soc/sti/uniperif_reader.c
index f791239a3087..8a0eb2050169 100644
--- a/sound/soc/sti/uniperif_reader.c
+++ b/sound/soc/sti/uniperif_reader.c
@@ -316,7 +316,7 @@ static int uni_reader_parse_dt(struct platform_device *pdev,
316 if (!info) 316 if (!info)
317 return -ENOMEM; 317 return -ENOMEM;
318 318
319 if (of_property_read_u32(node, "version", &reader->ver) || 319 if (of_property_read_u32(node, "st,version", &reader->ver) ||
320 reader->ver == SND_ST_UNIPERIF_VERSION_UNKNOWN) { 320 reader->ver == SND_ST_UNIPERIF_VERSION_UNKNOWN) {
321 dev_err(&pdev->dev, "Unknown uniperipheral version "); 321 dev_err(&pdev->dev, "Unknown uniperipheral version ");
322 return -EINVAL; 322 return -EINVAL;
@@ -346,7 +346,6 @@ int uni_reader_init(struct platform_device *pdev,
346 reader->hw = &uni_reader_pcm_hw; 346 reader->hw = &uni_reader_pcm_hw;
347 reader->dai_ops = &uni_reader_dai_ops; 347 reader->dai_ops = &uni_reader_dai_ops;
348 348
349 dev_err(reader->dev, "%s: enter\n", __func__);
350 ret = uni_reader_parse_dt(pdev, reader); 349 ret = uni_reader_parse_dt(pdev, reader);
351 if (ret < 0) { 350 if (ret < 0) {
352 dev_err(reader->dev, "Failed to parse DeviceTree"); 351 dev_err(reader->dev, "Failed to parse DeviceTree");
diff --git a/sound/soc/sunxi/sun4i-codec.c b/sound/soc/sunxi/sun4i-codec.c
index bcbf4da168b6..1bb896d78d09 100644
--- a/sound/soc/sunxi/sun4i-codec.c
+++ b/sound/soc/sunxi/sun4i-codec.c
@@ -2,6 +2,7 @@
2 * Copyright 2014 Emilio López <emilio@elopez.com.ar> 2 * Copyright 2014 Emilio López <emilio@elopez.com.ar>
3 * Copyright 2014 Jon Smirl <jonsmirl@gmail.com> 3 * Copyright 2014 Jon Smirl <jonsmirl@gmail.com>
4 * Copyright 2015 Maxime Ripard <maxime.ripard@free-electrons.com> 4 * Copyright 2015 Maxime Ripard <maxime.ripard@free-electrons.com>
5 * Copyright 2015 Adam Sampson <ats@offog.org>
5 * 6 *
6 * Based on the Allwinner SDK driver, released under the GPL. 7 * Based on the Allwinner SDK driver, released under the GPL.
7 * 8 *
@@ -404,7 +405,7 @@ static const struct snd_kcontrol_new sun4i_codec_pa_mute =
404static DECLARE_TLV_DB_SCALE(sun4i_codec_pa_volume_scale, -6300, 100, 1); 405static DECLARE_TLV_DB_SCALE(sun4i_codec_pa_volume_scale, -6300, 100, 1);
405 406
406static const struct snd_kcontrol_new sun4i_codec_widgets[] = { 407static const struct snd_kcontrol_new sun4i_codec_widgets[] = {
407 SOC_SINGLE_TLV("PA Volume", SUN4I_CODEC_DAC_ACTL, 408 SOC_SINGLE_TLV("Power Amplifier Volume", SUN4I_CODEC_DAC_ACTL,
408 SUN4I_CODEC_DAC_ACTL_PA_VOL, 0x3F, 0, 409 SUN4I_CODEC_DAC_ACTL_PA_VOL, 0x3F, 0,
409 sun4i_codec_pa_volume_scale), 410 sun4i_codec_pa_volume_scale),
410}; 411};
@@ -452,12 +453,12 @@ static const struct snd_soc_dapm_widget sun4i_codec_dapm_widgets[] = {
452 SND_SOC_DAPM_SUPPLY("Mixer Enable", SUN4I_CODEC_DAC_ACTL, 453 SND_SOC_DAPM_SUPPLY("Mixer Enable", SUN4I_CODEC_DAC_ACTL,
453 SUN4I_CODEC_DAC_ACTL_MIXEN, 0, NULL, 0), 454 SUN4I_CODEC_DAC_ACTL_MIXEN, 0, NULL, 0),
454 455
455 /* Pre-Amplifier */ 456 /* Power Amplifier */
456 SND_SOC_DAPM_MIXER("Pre-Amplifier", SUN4I_CODEC_ADC_ACTL, 457 SND_SOC_DAPM_MIXER("Power Amplifier", SUN4I_CODEC_ADC_ACTL,
457 SUN4I_CODEC_ADC_ACTL_PA_EN, 0, 458 SUN4I_CODEC_ADC_ACTL_PA_EN, 0,
458 sun4i_codec_pa_mixer_controls, 459 sun4i_codec_pa_mixer_controls,
459 ARRAY_SIZE(sun4i_codec_pa_mixer_controls)), 460 ARRAY_SIZE(sun4i_codec_pa_mixer_controls)),
460 SND_SOC_DAPM_SWITCH("Pre-Amplifier Mute", SND_SOC_NOPM, 0, 0, 461 SND_SOC_DAPM_SWITCH("Power Amplifier Mute", SND_SOC_NOPM, 0, 0,
461 &sun4i_codec_pa_mute), 462 &sun4i_codec_pa_mute),
462 463
463 SND_SOC_DAPM_OUTPUT("HP Right"), 464 SND_SOC_DAPM_OUTPUT("HP Right"),
@@ -480,16 +481,16 @@ static const struct snd_soc_dapm_route sun4i_codec_dapm_routes[] = {
480 { "Left Mixer", NULL, "Mixer Enable" }, 481 { "Left Mixer", NULL, "Mixer Enable" },
481 { "Left Mixer", "Left DAC Playback Switch", "Left DAC" }, 482 { "Left Mixer", "Left DAC Playback Switch", "Left DAC" },
482 483
483 /* Pre-Amplifier Mixer Routes */ 484 /* Power Amplifier Routes */
484 { "Pre-Amplifier", "Mixer Playback Switch", "Left Mixer" }, 485 { "Power Amplifier", "Mixer Playback Switch", "Left Mixer" },
485 { "Pre-Amplifier", "Mixer Playback Switch", "Right Mixer" }, 486 { "Power Amplifier", "Mixer Playback Switch", "Right Mixer" },
486 { "Pre-Amplifier", "DAC Playback Switch", "Left DAC" }, 487 { "Power Amplifier", "DAC Playback Switch", "Left DAC" },
487 { "Pre-Amplifier", "DAC Playback Switch", "Right DAC" }, 488 { "Power Amplifier", "DAC Playback Switch", "Right DAC" },
488 489
489 /* PA -> HP path */ 490 /* Headphone Output Routes */
490 { "Pre-Amplifier Mute", "Switch", "Pre-Amplifier" }, 491 { "Power Amplifier Mute", "Switch", "Power Amplifier" },
491 { "HP Right", NULL, "Pre-Amplifier Mute" }, 492 { "HP Right", NULL, "Power Amplifier Mute" },
492 { "HP Left", NULL, "Pre-Amplifier Mute" }, 493 { "HP Left", NULL, "Power Amplifier Mute" },
493}; 494};
494 495
495static struct snd_soc_codec_driver sun4i_codec_codec = { 496static struct snd_soc_codec_driver sun4i_codec_codec = {
diff --git a/sound/usb/midi.c b/sound/usb/midi.c
index 7661616f3636..5b4c58c3e2c5 100644
--- a/sound/usb/midi.c
+++ b/sound/usb/midi.c
@@ -174,6 +174,8 @@ struct snd_usb_midi_in_endpoint {
174 u8 running_status_length; 174 u8 running_status_length;
175 } ports[0x10]; 175 } ports[0x10];
176 u8 seen_f5; 176 u8 seen_f5;
177 bool in_sysex;
178 u8 last_cin;
177 u8 error_resubmit; 179 u8 error_resubmit;
178 int current_port; 180 int current_port;
179}; 181};
@@ -468,6 +470,39 @@ static void snd_usbmidi_maudio_broken_running_status_input(
468} 470}
469 471
470/* 472/*
473 * QinHeng CH345 is buggy: every second packet inside a SysEx has not CIN 4
474 * but the previously seen CIN, but still with three data bytes.
475 */
476static void ch345_broken_sysex_input(struct snd_usb_midi_in_endpoint *ep,
477 uint8_t *buffer, int buffer_length)
478{
479 unsigned int i, cin, length;
480
481 for (i = 0; i + 3 < buffer_length; i += 4) {
482 if (buffer[i] == 0 && i > 0)
483 break;
484 cin = buffer[i] & 0x0f;
485 if (ep->in_sysex &&
486 cin == ep->last_cin &&
487 (buffer[i + 1 + (cin == 0x6)] & 0x80) == 0)
488 cin = 0x4;
489#if 0
490 if (buffer[i + 1] == 0x90) {
491 /*
492 * Either a corrupted running status or a real note-on
493 * message; impossible to detect reliably.
494 */
495 }
496#endif
497 length = snd_usbmidi_cin_length[cin];
498 snd_usbmidi_input_data(ep, 0, &buffer[i + 1], length);
499 ep->in_sysex = cin == 0x4;
500 if (!ep->in_sysex)
501 ep->last_cin = cin;
502 }
503}
504
505/*
471 * CME protocol: like the standard protocol, but SysEx commands are sent as a 506 * CME protocol: like the standard protocol, but SysEx commands are sent as a
472 * single USB packet preceded by a 0x0F byte. 507 * single USB packet preceded by a 0x0F byte.
473 */ 508 */
@@ -660,6 +695,12 @@ static struct usb_protocol_ops snd_usbmidi_cme_ops = {
660 .output_packet = snd_usbmidi_output_standard_packet, 695 .output_packet = snd_usbmidi_output_standard_packet,
661}; 696};
662 697
698static struct usb_protocol_ops snd_usbmidi_ch345_broken_sysex_ops = {
699 .input = ch345_broken_sysex_input,
700 .output = snd_usbmidi_standard_output,
701 .output_packet = snd_usbmidi_output_standard_packet,
702};
703
663/* 704/*
664 * AKAI MPD16 protocol: 705 * AKAI MPD16 protocol:
665 * 706 *
@@ -1341,6 +1382,7 @@ static int snd_usbmidi_out_endpoint_create(struct snd_usb_midi *umidi,
1341 * Various chips declare a packet size larger than 4 bytes, but 1382 * Various chips declare a packet size larger than 4 bytes, but
1342 * do not actually work with larger packets: 1383 * do not actually work with larger packets:
1343 */ 1384 */
1385 case USB_ID(0x0a67, 0x5011): /* Medeli DD305 */
1344 case USB_ID(0x0a92, 0x1020): /* ESI M4U */ 1386 case USB_ID(0x0a92, 0x1020): /* ESI M4U */
1345 case USB_ID(0x1430, 0x474b): /* RedOctane GH MIDI INTERFACE */ 1387 case USB_ID(0x1430, 0x474b): /* RedOctane GH MIDI INTERFACE */
1346 case USB_ID(0x15ca, 0x0101): /* Textech USB Midi Cable */ 1388 case USB_ID(0x15ca, 0x0101): /* Textech USB Midi Cable */
@@ -2378,6 +2420,10 @@ int snd_usbmidi_create(struct snd_card *card,
2378 2420
2379 err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints); 2421 err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
2380 break; 2422 break;
2423 case QUIRK_MIDI_CH345:
2424 umidi->usb_protocol_ops = &snd_usbmidi_ch345_broken_sysex_ops;
2425 err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
2426 break;
2381 default: 2427 default:
2382 dev_err(&umidi->dev->dev, "invalid quirk type %d\n", 2428 dev_err(&umidi->dev->dev, "invalid quirk type %d\n",
2383 quirk->type); 2429 quirk->type);
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 1a1e2e4df35e..c60a776e815d 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -2829,6 +2829,17 @@ YAMAHA_DEVICE(0x7010, "UB99"),
2829 .idProduct = 0x1020, 2829 .idProduct = 0x1020,
2830}, 2830},
2831 2831
2832/* QinHeng devices */
2833{
2834 USB_DEVICE(0x1a86, 0x752d),
2835 .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
2836 .vendor_name = "QinHeng",
2837 .product_name = "CH345",
2838 .ifnum = 1,
2839 .type = QUIRK_MIDI_CH345
2840 }
2841},
2842
2832/* KeithMcMillen Stringport */ 2843/* KeithMcMillen Stringport */
2833{ 2844{
2834 USB_DEVICE(0x1f38, 0x0001), 2845 USB_DEVICE(0x1f38, 0x0001),
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 5ca80e7d30cd..7016ad898187 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -538,6 +538,7 @@ int snd_usb_create_quirk(struct snd_usb_audio *chip,
538 [QUIRK_MIDI_CME] = create_any_midi_quirk, 538 [QUIRK_MIDI_CME] = create_any_midi_quirk,
539 [QUIRK_MIDI_AKAI] = create_any_midi_quirk, 539 [QUIRK_MIDI_AKAI] = create_any_midi_quirk,
540 [QUIRK_MIDI_FTDI] = create_any_midi_quirk, 540 [QUIRK_MIDI_FTDI] = create_any_midi_quirk,
541 [QUIRK_MIDI_CH345] = create_any_midi_quirk,
541 [QUIRK_AUDIO_STANDARD_INTERFACE] = create_standard_audio_quirk, 542 [QUIRK_AUDIO_STANDARD_INTERFACE] = create_standard_audio_quirk,
542 [QUIRK_AUDIO_FIXED_ENDPOINT] = create_fixed_stream_quirk, 543 [QUIRK_AUDIO_FIXED_ENDPOINT] = create_fixed_stream_quirk,
543 [QUIRK_AUDIO_EDIROL_UAXX] = create_uaxx_quirk, 544 [QUIRK_AUDIO_EDIROL_UAXX] = create_uaxx_quirk,
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
index 15a12715bd05..b665d85555cb 100644
--- a/sound/usb/usbaudio.h
+++ b/sound/usb/usbaudio.h
@@ -95,6 +95,7 @@ enum quirk_type {
95 QUIRK_MIDI_AKAI, 95 QUIRK_MIDI_AKAI,
96 QUIRK_MIDI_US122L, 96 QUIRK_MIDI_US122L,
97 QUIRK_MIDI_FTDI, 97 QUIRK_MIDI_FTDI,
98 QUIRK_MIDI_CH345,
98 QUIRK_AUDIO_STANDARD_INTERFACE, 99 QUIRK_AUDIO_STANDARD_INTERFACE,
99 QUIRK_AUDIO_FIXED_ENDPOINT, 100 QUIRK_AUDIO_FIXED_ENDPOINT,
100 QUIRK_AUDIO_EDIROL_UAXX, 101 QUIRK_AUDIO_EDIROL_UAXX,
diff --git a/tools/Makefile b/tools/Makefile
index d6f307dfb1a3..7dc820a8c1f1 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -32,6 +32,10 @@ help:
32 @echo ' from the kernel command line to build and install one of' 32 @echo ' from the kernel command line to build and install one of'
33 @echo ' the tools above' 33 @echo ' the tools above'
34 @echo '' 34 @echo ''
35 @echo ' $$ make tools/all'
36 @echo ''
37 @echo ' builds all tools.'
38 @echo ''
35 @echo ' $$ make tools/install' 39 @echo ' $$ make tools/install'
36 @echo '' 40 @echo ''
37 @echo ' installs all tools.' 41 @echo ' installs all tools.'
@@ -77,6 +81,11 @@ tmon: FORCE
77freefall: FORCE 81freefall: FORCE
78 $(call descend,laptop/$@) 82 $(call descend,laptop/$@)
79 83
84all: acpi cgroup cpupower hv firewire lguest \
85 perf selftests turbostat usb \
86 virtio vm net x86_energy_perf_policy \
87 tmon freefall
88
80acpi_install: 89acpi_install:
81 $(call descend,power/$(@:_install=),install) 90 $(call descend,power/$(@:_install=),install)
82 91
@@ -101,7 +110,7 @@ freefall_install:
101install: acpi_install cgroup_install cpupower_install hv_install firewire_install lguest_install \ 110install: acpi_install cgroup_install cpupower_install hv_install firewire_install lguest_install \
102 perf_install selftests_install turbostat_install usb_install \ 111 perf_install selftests_install turbostat_install usb_install \
103 virtio_install vm_install net_install x86_energy_perf_policy_install \ 112 virtio_install vm_install net_install x86_energy_perf_policy_install \
104 tmon freefall_install 113 tmon_install freefall_install
105 114
106acpi_clean: 115acpi_clean:
107 $(call descend,power/acpi,clean) 116 $(call descend,power/acpi,clean)
diff --git a/tools/net/Makefile b/tools/net/Makefile
index ee577ea03ba5..ddf888010652 100644
--- a/tools/net/Makefile
+++ b/tools/net/Makefile
@@ -4,6 +4,9 @@ CC = gcc
4LEX = flex 4LEX = flex
5YACC = bison 5YACC = bison
6 6
7CFLAGS += -Wall -O2
8CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
9
7%.yacc.c: %.y 10%.yacc.c: %.y
8 $(YACC) -o $@ -d $< 11 $(YACC) -o $@ -d $<
9 12
@@ -12,15 +15,13 @@ YACC = bison
12 15
13all : bpf_jit_disasm bpf_dbg bpf_asm 16all : bpf_jit_disasm bpf_dbg bpf_asm
14 17
15bpf_jit_disasm : CFLAGS = -Wall -O2 -DPACKAGE='bpf_jit_disasm' 18bpf_jit_disasm : CFLAGS += -DPACKAGE='bpf_jit_disasm'
16bpf_jit_disasm : LDLIBS = -lopcodes -lbfd -ldl 19bpf_jit_disasm : LDLIBS = -lopcodes -lbfd -ldl
17bpf_jit_disasm : bpf_jit_disasm.o 20bpf_jit_disasm : bpf_jit_disasm.o
18 21
19bpf_dbg : CFLAGS = -Wall -O2
20bpf_dbg : LDLIBS = -lreadline 22bpf_dbg : LDLIBS = -lreadline
21bpf_dbg : bpf_dbg.o 23bpf_dbg : bpf_dbg.o
22 24
23bpf_asm : CFLAGS = -Wall -O2 -I.
24bpf_asm : LDLIBS = 25bpf_asm : LDLIBS =
25bpf_asm : bpf_asm.o bpf_exp.yacc.o bpf_exp.lex.o 26bpf_asm : bpf_asm.o bpf_exp.yacc.o bpf_exp.lex.o
26bpf_exp.lex.o : bpf_exp.yacc.c 27bpf_exp.lex.o : bpf_exp.yacc.c
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 0a945d2e8ca5..99d127fe9c35 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -675,6 +675,7 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
675 .fork = perf_event__repipe, 675 .fork = perf_event__repipe,
676 .exit = perf_event__repipe, 676 .exit = perf_event__repipe,
677 .lost = perf_event__repipe, 677 .lost = perf_event__repipe,
678 .lost_samples = perf_event__repipe,
678 .aux = perf_event__repipe, 679 .aux = perf_event__repipe,
679 .itrace_start = perf_event__repipe, 680 .itrace_start = perf_event__repipe,
680 .context_switch = perf_event__repipe, 681 .context_switch = perf_event__repipe,
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 2853ad2bd435..f256fac1e722 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -44,7 +44,7 @@
44struct report { 44struct report {
45 struct perf_tool tool; 45 struct perf_tool tool;
46 struct perf_session *session; 46 struct perf_session *session;
47 bool force, use_tui, use_gtk, use_stdio; 47 bool use_tui, use_gtk, use_stdio;
48 bool hide_unresolved; 48 bool hide_unresolved;
49 bool dont_use_callchains; 49 bool dont_use_callchains;
50 bool show_full_info; 50 bool show_full_info;
@@ -678,7 +678,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
678 "file", "vmlinux pathname"), 678 "file", "vmlinux pathname"),
679 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, 679 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
680 "file", "kallsyms pathname"), 680 "file", "kallsyms pathname"),
681 OPT_BOOLEAN('f', "force", &report.force, "don't complain, do it"), 681 OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"),
682 OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules, 682 OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
683 "load module symbols - WARNING: use only with -k and LIVE kernel"), 683 "load module symbols - WARNING: use only with -k and LIVE kernel"),
684 OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples, 684 OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
@@ -832,7 +832,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
832 } 832 }
833 833
834 file.path = input_name; 834 file.path = input_name;
835 file.force = report.force; 835 file.force = symbol_conf.force;
836 836
837repeat: 837repeat:
838 session = perf_session__new(&file, false, &report.tool); 838 session = perf_session__new(&file, false, &report.tool);
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index e5afb8936040..fa9eb92c9e24 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -1430,7 +1430,6 @@ close_file_and_continue:
1430 1430
1431struct popup_action { 1431struct popup_action {
1432 struct thread *thread; 1432 struct thread *thread;
1433 struct dso *dso;
1434 struct map_symbol ms; 1433 struct map_symbol ms;
1435 int socket; 1434 int socket;
1436 1435
@@ -1565,7 +1564,6 @@ add_dso_opt(struct hist_browser *browser, struct popup_action *act,
1565 return 0; 1564 return 0;
1566 1565
1567 act->ms.map = map; 1566 act->ms.map = map;
1568 act->dso = map->dso;
1569 act->fn = do_zoom_dso; 1567 act->fn = do_zoom_dso;
1570 return 1; 1568 return 1;
1571} 1569}
@@ -1827,7 +1825,6 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
1827 1825
1828 while (1) { 1826 while (1) {
1829 struct thread *thread = NULL; 1827 struct thread *thread = NULL;
1830 struct dso *dso = NULL;
1831 struct map *map = NULL; 1828 struct map *map = NULL;
1832 int choice = 0; 1829 int choice = 0;
1833 int socked_id = -1; 1830 int socked_id = -1;
@@ -1839,8 +1836,6 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
1839 if (browser->he_selection != NULL) { 1836 if (browser->he_selection != NULL) {
1840 thread = hist_browser__selected_thread(browser); 1837 thread = hist_browser__selected_thread(browser);
1841 map = browser->selection->map; 1838 map = browser->selection->map;
1842 if (map)
1843 dso = map->dso;
1844 socked_id = browser->he_selection->socket; 1839 socked_id = browser->he_selection->socket;
1845 } 1840 }
1846 switch (key) { 1841 switch (key) {
@@ -1874,7 +1869,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
1874 hist_browser__dump(browser); 1869 hist_browser__dump(browser);
1875 continue; 1870 continue;
1876 case 'd': 1871 case 'd':
1877 actions->dso = dso; 1872 actions->ms.map = map;
1878 do_zoom_dso(browser, actions); 1873 do_zoom_dso(browser, actions);
1879 continue; 1874 continue;
1880 case 'V': 1875 case 'V':
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index d909459fb54c..217b5a60e2ab 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -76,6 +76,7 @@ struct perf_tool build_id__mark_dso_hit_ops = {
76 .exit = perf_event__exit_del_thread, 76 .exit = perf_event__exit_del_thread,
77 .attr = perf_event__process_attr, 77 .attr = perf_event__process_attr,
78 .build_id = perf_event__process_build_id, 78 .build_id = perf_event__process_build_id,
79 .ordered_events = true,
79}; 80};
80 81
81int build_id__sprintf(const u8 *build_id, int len, char *bf) 82int build_id__sprintf(const u8 *build_id, int len, char *bf)
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index 7c0c08386a1d..425df5c86c9c 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -933,6 +933,7 @@ static struct dso *__dso__findlink_by_longname(struct rb_root *root,
933 /* Add new node and rebalance tree */ 933 /* Add new node and rebalance tree */
934 rb_link_node(&dso->rb_node, parent, p); 934 rb_link_node(&dso->rb_node, parent, p);
935 rb_insert_color(&dso->rb_node, root); 935 rb_insert_color(&dso->rb_node, root);
936 dso->root = root;
936 } 937 }
937 return NULL; 938 return NULL;
938} 939}
@@ -945,15 +946,30 @@ static inline struct dso *__dso__find_by_longname(struct rb_root *root,
945 946
946void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated) 947void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
947{ 948{
949 struct rb_root *root = dso->root;
950
948 if (name == NULL) 951 if (name == NULL)
949 return; 952 return;
950 953
951 if (dso->long_name_allocated) 954 if (dso->long_name_allocated)
952 free((char *)dso->long_name); 955 free((char *)dso->long_name);
953 956
957 if (root) {
958 rb_erase(&dso->rb_node, root);
959 /*
960 * __dso__findlink_by_longname() isn't guaranteed to add it
961 * back, so a clean removal is required here.
962 */
963 RB_CLEAR_NODE(&dso->rb_node);
964 dso->root = NULL;
965 }
966
954 dso->long_name = name; 967 dso->long_name = name;
955 dso->long_name_len = strlen(name); 968 dso->long_name_len = strlen(name);
956 dso->long_name_allocated = name_allocated; 969 dso->long_name_allocated = name_allocated;
970
971 if (root)
972 __dso__findlink_by_longname(root, dso, NULL);
957} 973}
958 974
959void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated) 975void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
@@ -1046,6 +1062,7 @@ struct dso *dso__new(const char *name)
1046 dso->kernel = DSO_TYPE_USER; 1062 dso->kernel = DSO_TYPE_USER;
1047 dso->needs_swap = DSO_SWAP__UNSET; 1063 dso->needs_swap = DSO_SWAP__UNSET;
1048 RB_CLEAR_NODE(&dso->rb_node); 1064 RB_CLEAR_NODE(&dso->rb_node);
1065 dso->root = NULL;
1049 INIT_LIST_HEAD(&dso->node); 1066 INIT_LIST_HEAD(&dso->node);
1050 INIT_LIST_HEAD(&dso->data.open_entry); 1067 INIT_LIST_HEAD(&dso->data.open_entry);
1051 pthread_mutex_init(&dso->lock, NULL); 1068 pthread_mutex_init(&dso->lock, NULL);
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index fc8db9c764ac..45ec4d0a50ed 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -135,6 +135,7 @@ struct dso {
135 pthread_mutex_t lock; 135 pthread_mutex_t lock;
136 struct list_head node; 136 struct list_head node;
137 struct rb_node rb_node; /* rbtree node sorted by long name */ 137 struct rb_node rb_node; /* rbtree node sorted by long name */
138 struct rb_root *root; /* root of rbtree that rb_node is in */
138 struct rb_root symbols[MAP__NR_TYPES]; 139 struct rb_root symbols[MAP__NR_TYPES];
139 struct rb_root symbol_names[MAP__NR_TYPES]; 140 struct rb_root symbol_names[MAP__NR_TYPES];
140 struct { 141 struct {
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 5ef90be2a249..8b303ff20289 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -91,6 +91,7 @@ static void dsos__purge(struct dsos *dsos)
91 91
92 list_for_each_entry_safe(pos, n, &dsos->head, node) { 92 list_for_each_entry_safe(pos, n, &dsos->head, node) {
93 RB_CLEAR_NODE(&pos->rb_node); 93 RB_CLEAR_NODE(&pos->rb_node);
94 pos->root = NULL;
94 list_del_init(&pos->node); 95 list_del_init(&pos->node);
95 dso__put(pos); 96 dso__put(pos);
96 } 97 }
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index bd8f03de5e40..05012bb178d7 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -1183,7 +1183,7 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
1183 container_of(pf, struct trace_event_finder, pf); 1183 container_of(pf, struct trace_event_finder, pf);
1184 struct perf_probe_point *pp = &pf->pev->point; 1184 struct perf_probe_point *pp = &pf->pev->point;
1185 struct probe_trace_event *tev; 1185 struct probe_trace_event *tev;
1186 struct perf_probe_arg *args; 1186 struct perf_probe_arg *args = NULL;
1187 int ret, i; 1187 int ret, i;
1188 1188
1189 /* Check number of tevs */ 1189 /* Check number of tevs */
@@ -1198,19 +1198,23 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
1198 ret = convert_to_trace_point(&pf->sp_die, tf->mod, pf->addr, 1198 ret = convert_to_trace_point(&pf->sp_die, tf->mod, pf->addr,
1199 pp->retprobe, pp->function, &tev->point); 1199 pp->retprobe, pp->function, &tev->point);
1200 if (ret < 0) 1200 if (ret < 0)
1201 return ret; 1201 goto end;
1202 1202
1203 tev->point.realname = strdup(dwarf_diename(sc_die)); 1203 tev->point.realname = strdup(dwarf_diename(sc_die));
1204 if (!tev->point.realname) 1204 if (!tev->point.realname) {
1205 return -ENOMEM; 1205 ret = -ENOMEM;
1206 goto end;
1207 }
1206 1208
1207 pr_debug("Probe point found: %s+%lu\n", tev->point.symbol, 1209 pr_debug("Probe point found: %s+%lu\n", tev->point.symbol,
1208 tev->point.offset); 1210 tev->point.offset);
1209 1211
1210 /* Expand special probe argument if exist */ 1212 /* Expand special probe argument if exist */
1211 args = zalloc(sizeof(struct perf_probe_arg) * MAX_PROBE_ARGS); 1213 args = zalloc(sizeof(struct perf_probe_arg) * MAX_PROBE_ARGS);
1212 if (args == NULL) 1214 if (args == NULL) {
1213 return -ENOMEM; 1215 ret = -ENOMEM;
1216 goto end;
1217 }
1214 1218
1215 ret = expand_probe_args(sc_die, pf, args); 1219 ret = expand_probe_args(sc_die, pf, args);
1216 if (ret < 0) 1220 if (ret < 0)
@@ -1234,6 +1238,10 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
1234 } 1238 }
1235 1239
1236end: 1240end:
1241 if (ret) {
1242 clear_probe_trace_event(tev);
1243 tf->ntevs--;
1244 }
1237 free(args); 1245 free(args);
1238 return ret; 1246 return ret;
1239} 1247}
@@ -1246,7 +1254,7 @@ int debuginfo__find_trace_events(struct debuginfo *dbg,
1246 struct trace_event_finder tf = { 1254 struct trace_event_finder tf = {
1247 .pf = {.pev = pev, .callback = add_probe_trace_event}, 1255 .pf = {.pev = pev, .callback = add_probe_trace_event},
1248 .max_tevs = probe_conf.max_probes, .mod = dbg->mod}; 1256 .max_tevs = probe_conf.max_probes, .mod = dbg->mod};
1249 int ret; 1257 int ret, i;
1250 1258
1251 /* Allocate result tevs array */ 1259 /* Allocate result tevs array */
1252 *tevs = zalloc(sizeof(struct probe_trace_event) * tf.max_tevs); 1260 *tevs = zalloc(sizeof(struct probe_trace_event) * tf.max_tevs);
@@ -1258,6 +1266,8 @@ int debuginfo__find_trace_events(struct debuginfo *dbg,
1258 1266
1259 ret = debuginfo__find_probes(dbg, &tf.pf); 1267 ret = debuginfo__find_probes(dbg, &tf.pf);
1260 if (ret < 0) { 1268 if (ret < 0) {
1269 for (i = 0; i < tf.ntevs; i++)
1270 clear_probe_trace_event(&tf.tevs[i]);
1261 zfree(tevs); 1271 zfree(tevs);
1262 return ret; 1272 return ret;
1263 } 1273 }
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index b4cc7662677e..cd08027a6d2c 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -654,19 +654,24 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map,
654 struct map_groups *kmaps = map__kmaps(map); 654 struct map_groups *kmaps = map__kmaps(map);
655 struct map *curr_map; 655 struct map *curr_map;
656 struct symbol *pos; 656 struct symbol *pos;
657 int count = 0, moved = 0; 657 int count = 0;
658 struct rb_root old_root = dso->symbols[map->type];
658 struct rb_root *root = &dso->symbols[map->type]; 659 struct rb_root *root = &dso->symbols[map->type];
659 struct rb_node *next = rb_first(root); 660 struct rb_node *next = rb_first(root);
660 661
661 if (!kmaps) 662 if (!kmaps)
662 return -1; 663 return -1;
663 664
665 *root = RB_ROOT;
666
664 while (next) { 667 while (next) {
665 char *module; 668 char *module;
666 669
667 pos = rb_entry(next, struct symbol, rb_node); 670 pos = rb_entry(next, struct symbol, rb_node);
668 next = rb_next(&pos->rb_node); 671 next = rb_next(&pos->rb_node);
669 672
673 rb_erase_init(&pos->rb_node, &old_root);
674
670 module = strchr(pos->name, '\t'); 675 module = strchr(pos->name, '\t');
671 if (module) 676 if (module)
672 *module = '\0'; 677 *module = '\0';
@@ -674,28 +679,21 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map,
674 curr_map = map_groups__find(kmaps, map->type, pos->start); 679 curr_map = map_groups__find(kmaps, map->type, pos->start);
675 680
676 if (!curr_map || (filter && filter(curr_map, pos))) { 681 if (!curr_map || (filter && filter(curr_map, pos))) {
677 rb_erase_init(&pos->rb_node, root);
678 symbol__delete(pos); 682 symbol__delete(pos);
679 } else { 683 continue;
680 pos->start -= curr_map->start - curr_map->pgoff;
681 if (pos->end)
682 pos->end -= curr_map->start - curr_map->pgoff;
683 if (curr_map->dso != map->dso) {
684 rb_erase_init(&pos->rb_node, root);
685 symbols__insert(
686 &curr_map->dso->symbols[curr_map->type],
687 pos);
688 ++moved;
689 } else {
690 ++count;
691 }
692 } 684 }
685
686 pos->start -= curr_map->start - curr_map->pgoff;
687 if (pos->end)
688 pos->end -= curr_map->start - curr_map->pgoff;
689 symbols__insert(&curr_map->dso->symbols[curr_map->type], pos);
690 ++count;
693 } 691 }
694 692
695 /* Symbols have been adjusted */ 693 /* Symbols have been adjusted */
696 dso->adjust_symbols = 1; 694 dso->adjust_symbols = 1;
697 695
698 return count + moved; 696 return count;
699} 697}
700 698
701/* 699/*
@@ -1438,9 +1436,9 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
1438 if (lstat(dso->name, &st) < 0) 1436 if (lstat(dso->name, &st) < 0)
1439 goto out; 1437 goto out;
1440 1438
1441 if (st.st_uid && (st.st_uid != geteuid())) { 1439 if (!symbol_conf.force && st.st_uid && (st.st_uid != geteuid())) {
1442 pr_warning("File %s not owned by current user or root, " 1440 pr_warning("File %s not owned by current user or root, "
1443 "ignoring it.\n", dso->name); 1441 "ignoring it (use -f to override).\n", dso->name);
1444 goto out; 1442 goto out;
1445 } 1443 }
1446 1444
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 40073c60b83d..dcd786e364f2 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -84,6 +84,7 @@ struct symbol_conf {
84 unsigned short priv_size; 84 unsigned short priv_size;
85 unsigned short nr_events; 85 unsigned short nr_events;
86 bool try_vmlinux_path, 86 bool try_vmlinux_path,
87 force,
87 ignore_vmlinux, 88 ignore_vmlinux,
88 ignore_vmlinux_buildid, 89 ignore_vmlinux_buildid,
89 show_kernel_path, 90 show_kernel_path,
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index d8e4b20b6d54..0dac7e05a6ac 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -1173,9 +1173,9 @@ dump_nhm_platform_info(void)
1173 unsigned long long msr; 1173 unsigned long long msr;
1174 unsigned int ratio; 1174 unsigned int ratio;
1175 1175
1176 get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr); 1176 get_msr(base_cpu, MSR_PLATFORM_INFO, &msr);
1177 1177
1178 fprintf(stderr, "cpu%d: MSR_NHM_PLATFORM_INFO: 0x%08llx\n", base_cpu, msr); 1178 fprintf(stderr, "cpu%d: MSR_PLATFORM_INFO: 0x%08llx\n", base_cpu, msr);
1179 1179
1180 ratio = (msr >> 40) & 0xFF; 1180 ratio = (msr >> 40) & 0xFF;
1181 fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency frequency\n", 1181 fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency frequency\n",
@@ -1807,7 +1807,7 @@ void check_permissions()
1807 * 1807 *
1808 * MSR_SMI_COUNT 0x00000034 1808 * MSR_SMI_COUNT 0x00000034
1809 * 1809 *
1810 * MSR_NHM_PLATFORM_INFO 0x000000ce 1810 * MSR_PLATFORM_INFO 0x000000ce
1811 * MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2 1811 * MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2
1812 * 1812 *
1813 * MSR_PKG_C3_RESIDENCY 0x000003f8 1813 * MSR_PKG_C3_RESIDENCY 0x000003f8
@@ -1876,7 +1876,7 @@ int probe_nhm_msrs(unsigned int family, unsigned int model)
1876 get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr); 1876 get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr);
1877 pkg_cstate_limit = pkg_cstate_limits[msr & 0xF]; 1877 pkg_cstate_limit = pkg_cstate_limits[msr & 0xF];
1878 1878
1879 get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr); 1879 get_msr(base_cpu, MSR_PLATFORM_INFO, &msr);
1880 base_ratio = (msr >> 8) & 0xFF; 1880 base_ratio = (msr >> 8) & 0xFF;
1881 1881
1882 base_hz = base_ratio * bclk * 1000000; 1882 base_hz = base_ratio * bclk * 1000000;
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index 40ab4476c80a..51cf8256c6cd 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -420,8 +420,7 @@ static struct nfit_test_resource *nfit_test_lookup(resource_size_t addr)
420 420
421static int nfit_test0_alloc(struct nfit_test *t) 421static int nfit_test0_alloc(struct nfit_test *t)
422{ 422{
423 size_t nfit_size = sizeof(struct acpi_table_nfit) 423 size_t nfit_size = sizeof(struct acpi_nfit_system_address) * NUM_SPA
424 + sizeof(struct acpi_nfit_system_address) * NUM_SPA
425 + sizeof(struct acpi_nfit_memory_map) * NUM_MEM 424 + sizeof(struct acpi_nfit_memory_map) * NUM_MEM
426 + sizeof(struct acpi_nfit_control_region) * NUM_DCR 425 + sizeof(struct acpi_nfit_control_region) * NUM_DCR
427 + sizeof(struct acpi_nfit_data_region) * NUM_BDW 426 + sizeof(struct acpi_nfit_data_region) * NUM_BDW
@@ -471,8 +470,7 @@ static int nfit_test0_alloc(struct nfit_test *t)
471 470
472static int nfit_test1_alloc(struct nfit_test *t) 471static int nfit_test1_alloc(struct nfit_test *t)
473{ 472{
474 size_t nfit_size = sizeof(struct acpi_table_nfit) 473 size_t nfit_size = sizeof(struct acpi_nfit_system_address)
475 + sizeof(struct acpi_nfit_system_address)
476 + sizeof(struct acpi_nfit_memory_map) 474 + sizeof(struct acpi_nfit_memory_map)
477 + sizeof(struct acpi_nfit_control_region); 475 + sizeof(struct acpi_nfit_control_region);
478 476
@@ -488,39 +486,24 @@ static int nfit_test1_alloc(struct nfit_test *t)
488 return 0; 486 return 0;
489} 487}
490 488
491static void nfit_test_init_header(struct acpi_table_nfit *nfit, size_t size)
492{
493 memcpy(nfit->header.signature, ACPI_SIG_NFIT, 4);
494 nfit->header.length = size;
495 nfit->header.revision = 1;
496 memcpy(nfit->header.oem_id, "LIBND", 6);
497 memcpy(nfit->header.oem_table_id, "TEST", 5);
498 nfit->header.oem_revision = 1;
499 memcpy(nfit->header.asl_compiler_id, "TST", 4);
500 nfit->header.asl_compiler_revision = 1;
501}
502
503static void nfit_test0_setup(struct nfit_test *t) 489static void nfit_test0_setup(struct nfit_test *t)
504{ 490{
505 struct nvdimm_bus_descriptor *nd_desc; 491 struct nvdimm_bus_descriptor *nd_desc;
506 struct acpi_nfit_desc *acpi_desc; 492 struct acpi_nfit_desc *acpi_desc;
507 struct acpi_nfit_memory_map *memdev; 493 struct acpi_nfit_memory_map *memdev;
508 void *nfit_buf = t->nfit_buf; 494 void *nfit_buf = t->nfit_buf;
509 size_t size = t->nfit_size;
510 struct acpi_nfit_system_address *spa; 495 struct acpi_nfit_system_address *spa;
511 struct acpi_nfit_control_region *dcr; 496 struct acpi_nfit_control_region *dcr;
512 struct acpi_nfit_data_region *bdw; 497 struct acpi_nfit_data_region *bdw;
513 struct acpi_nfit_flush_address *flush; 498 struct acpi_nfit_flush_address *flush;
514 unsigned int offset; 499 unsigned int offset;
515 500
516 nfit_test_init_header(nfit_buf, size);
517
518 /* 501 /*
519 * spa0 (interleave first half of dimm0 and dimm1, note storage 502 * spa0 (interleave first half of dimm0 and dimm1, note storage
520 * does not actually alias the related block-data-window 503 * does not actually alias the related block-data-window
521 * regions) 504 * regions)
522 */ 505 */
523 spa = nfit_buf + sizeof(struct acpi_table_nfit); 506 spa = nfit_buf;
524 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 507 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
525 spa->header.length = sizeof(*spa); 508 spa->header.length = sizeof(*spa);
526 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16); 509 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
@@ -533,7 +516,7 @@ static void nfit_test0_setup(struct nfit_test *t)
533 * does not actually alias the related block-data-window 516 * does not actually alias the related block-data-window
534 * regions) 517 * regions)
535 */ 518 */
536 spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa); 519 spa = nfit_buf + sizeof(*spa);
537 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 520 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
538 spa->header.length = sizeof(*spa); 521 spa->header.length = sizeof(*spa);
539 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16); 522 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
@@ -542,7 +525,7 @@ static void nfit_test0_setup(struct nfit_test *t)
542 spa->length = SPA1_SIZE; 525 spa->length = SPA1_SIZE;
543 526
544 /* spa2 (dcr0) dimm0 */ 527 /* spa2 (dcr0) dimm0 */
545 spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 2; 528 spa = nfit_buf + sizeof(*spa) * 2;
546 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 529 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
547 spa->header.length = sizeof(*spa); 530 spa->header.length = sizeof(*spa);
548 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); 531 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
@@ -551,7 +534,7 @@ static void nfit_test0_setup(struct nfit_test *t)
551 spa->length = DCR_SIZE; 534 spa->length = DCR_SIZE;
552 535
553 /* spa3 (dcr1) dimm1 */ 536 /* spa3 (dcr1) dimm1 */
554 spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 3; 537 spa = nfit_buf + sizeof(*spa) * 3;
555 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 538 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
556 spa->header.length = sizeof(*spa); 539 spa->header.length = sizeof(*spa);
557 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); 540 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
@@ -560,7 +543,7 @@ static void nfit_test0_setup(struct nfit_test *t)
560 spa->length = DCR_SIZE; 543 spa->length = DCR_SIZE;
561 544
562 /* spa4 (dcr2) dimm2 */ 545 /* spa4 (dcr2) dimm2 */
563 spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 4; 546 spa = nfit_buf + sizeof(*spa) * 4;
564 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 547 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
565 spa->header.length = sizeof(*spa); 548 spa->header.length = sizeof(*spa);
566 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); 549 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
@@ -569,7 +552,7 @@ static void nfit_test0_setup(struct nfit_test *t)
569 spa->length = DCR_SIZE; 552 spa->length = DCR_SIZE;
570 553
571 /* spa5 (dcr3) dimm3 */ 554 /* spa5 (dcr3) dimm3 */
572 spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 5; 555 spa = nfit_buf + sizeof(*spa) * 5;
573 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 556 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
574 spa->header.length = sizeof(*spa); 557 spa->header.length = sizeof(*spa);
575 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); 558 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
@@ -578,7 +561,7 @@ static void nfit_test0_setup(struct nfit_test *t)
578 spa->length = DCR_SIZE; 561 spa->length = DCR_SIZE;
579 562
580 /* spa6 (bdw for dcr0) dimm0 */ 563 /* spa6 (bdw for dcr0) dimm0 */
581 spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 6; 564 spa = nfit_buf + sizeof(*spa) * 6;
582 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 565 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
583 spa->header.length = sizeof(*spa); 566 spa->header.length = sizeof(*spa);
584 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 567 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
@@ -587,7 +570,7 @@ static void nfit_test0_setup(struct nfit_test *t)
587 spa->length = DIMM_SIZE; 570 spa->length = DIMM_SIZE;
588 571
589 /* spa7 (bdw for dcr1) dimm1 */ 572 /* spa7 (bdw for dcr1) dimm1 */
590 spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 7; 573 spa = nfit_buf + sizeof(*spa) * 7;
591 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 574 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
592 spa->header.length = sizeof(*spa); 575 spa->header.length = sizeof(*spa);
593 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 576 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
@@ -596,7 +579,7 @@ static void nfit_test0_setup(struct nfit_test *t)
596 spa->length = DIMM_SIZE; 579 spa->length = DIMM_SIZE;
597 580
598 /* spa8 (bdw for dcr2) dimm2 */ 581 /* spa8 (bdw for dcr2) dimm2 */
599 spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 8; 582 spa = nfit_buf + sizeof(*spa) * 8;
600 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 583 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
601 spa->header.length = sizeof(*spa); 584 spa->header.length = sizeof(*spa);
602 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 585 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
@@ -605,7 +588,7 @@ static void nfit_test0_setup(struct nfit_test *t)
605 spa->length = DIMM_SIZE; 588 spa->length = DIMM_SIZE;
606 589
607 /* spa9 (bdw for dcr3) dimm3 */ 590 /* spa9 (bdw for dcr3) dimm3 */
608 spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 9; 591 spa = nfit_buf + sizeof(*spa) * 9;
609 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 592 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
610 spa->header.length = sizeof(*spa); 593 spa->header.length = sizeof(*spa);
611 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 594 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
@@ -613,7 +596,7 @@ static void nfit_test0_setup(struct nfit_test *t)
613 spa->address = t->dimm_dma[3]; 596 spa->address = t->dimm_dma[3];
614 spa->length = DIMM_SIZE; 597 spa->length = DIMM_SIZE;
615 598
616 offset = sizeof(struct acpi_table_nfit) + sizeof(*spa) * 10; 599 offset = sizeof(*spa) * 10;
617 /* mem-region0 (spa0, dimm0) */ 600 /* mem-region0 (spa0, dimm0) */
618 memdev = nfit_buf + offset; 601 memdev = nfit_buf + offset;
619 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 602 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
@@ -1100,15 +1083,13 @@ static void nfit_test0_setup(struct nfit_test *t)
1100 1083
1101static void nfit_test1_setup(struct nfit_test *t) 1084static void nfit_test1_setup(struct nfit_test *t)
1102{ 1085{
1103 size_t size = t->nfit_size, offset; 1086 size_t offset;
1104 void *nfit_buf = t->nfit_buf; 1087 void *nfit_buf = t->nfit_buf;
1105 struct acpi_nfit_memory_map *memdev; 1088 struct acpi_nfit_memory_map *memdev;
1106 struct acpi_nfit_control_region *dcr; 1089 struct acpi_nfit_control_region *dcr;
1107 struct acpi_nfit_system_address *spa; 1090 struct acpi_nfit_system_address *spa;
1108 1091
1109 nfit_test_init_header(nfit_buf, size); 1092 offset = 0;
1110
1111 offset = sizeof(struct acpi_table_nfit);
1112 /* spa0 (flat range with no bdw aliasing) */ 1093 /* spa0 (flat range with no bdw aliasing) */
1113 spa = nfit_buf + offset; 1094 spa = nfit_buf + offset;
1114 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 1095 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
diff --git a/tools/testing/selftests/futex/README b/tools/testing/selftests/futex/README
index 3224a049b196..0558bb9ce0a6 100644
--- a/tools/testing/selftests/futex/README
+++ b/tools/testing/selftests/futex/README
@@ -27,7 +27,7 @@ o The build system shall remain as simple as possible, avoiding any archive or
27o Where possible, any helper functions or other package-wide code shall be 27o Where possible, any helper functions or other package-wide code shall be
28 implemented in header files, avoiding the need to compile intermediate object 28 implemented in header files, avoiding the need to compile intermediate object
29 files. 29 files.
30o External dependendencies shall remain as minimal as possible. Currently gcc 30o External dependencies shall remain as minimal as possible. Currently gcc
31 and glibc are the only dependencies. 31 and glibc are the only dependencies.
32o Tests return 0 for success and < 0 for failure. 32o Tests return 0 for success and < 0 for failure.
33 33
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index e38cc54942db..882fe83a3554 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -492,6 +492,9 @@ TEST_SIGNAL(KILL_one_arg_six, SIGSYS)
492 pid_t parent = getppid(); 492 pid_t parent = getppid();
493 int fd; 493 int fd;
494 void *map1, *map2; 494 void *map1, *map2;
495 int page_size = sysconf(_SC_PAGESIZE);
496
497 ASSERT_LT(0, page_size);
495 498
496 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 499 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
497 ASSERT_EQ(0, ret); 500 ASSERT_EQ(0, ret);
@@ -504,16 +507,16 @@ TEST_SIGNAL(KILL_one_arg_six, SIGSYS)
504 507
505 EXPECT_EQ(parent, syscall(__NR_getppid)); 508 EXPECT_EQ(parent, syscall(__NR_getppid));
506 map1 = (void *)syscall(sysno, 509 map1 = (void *)syscall(sysno,
507 NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, fd, PAGE_SIZE); 510 NULL, page_size, PROT_READ, MAP_PRIVATE, fd, page_size);
508 EXPECT_NE(MAP_FAILED, map1); 511 EXPECT_NE(MAP_FAILED, map1);
509 /* mmap2() should never return. */ 512 /* mmap2() should never return. */
510 map2 = (void *)syscall(sysno, 513 map2 = (void *)syscall(sysno,
511 NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, fd, 0x0C0FFEE); 514 NULL, page_size, PROT_READ, MAP_PRIVATE, fd, 0x0C0FFEE);
512 EXPECT_EQ(MAP_FAILED, map2); 515 EXPECT_EQ(MAP_FAILED, map2);
513 516
514 /* The test failed, so clean up the resources. */ 517 /* The test failed, so clean up the resources. */
515 munmap(map1, PAGE_SIZE); 518 munmap(map1, page_size);
516 munmap(map2, PAGE_SIZE); 519 munmap(map2, page_size);
517 close(fd); 520 close(fd);
518} 521}
519 522
diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c
index bcf5ec760eb9..5a6016224bb9 100644
--- a/tools/vm/page-types.c
+++ b/tools/vm/page-types.c
@@ -128,6 +128,7 @@ static const char * const page_flag_names[] = {
128 [KPF_THP] = "t:thp", 128 [KPF_THP] = "t:thp",
129 [KPF_BALLOON] = "o:balloon", 129 [KPF_BALLOON] = "o:balloon",
130 [KPF_ZERO_PAGE] = "z:zero_page", 130 [KPF_ZERO_PAGE] = "z:zero_page",
131 [KPF_IDLE] = "i:idle_page",
131 132
132 [KPF_RESERVED] = "r:reserved", 133 [KPF_RESERVED] = "r:reserved",
133 [KPF_MLOCKED] = "m:mlocked", 134 [KPF_MLOCKED] = "m:mlocked",
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 21a0ab2d8919..69bca185c471 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -221,17 +221,23 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
221 kvm_timer_update_state(vcpu); 221 kvm_timer_update_state(vcpu);
222 222
223 /* 223 /*
224 * If we enter the guest with the virtual input level to the VGIC 224 * If we enter the guest with the virtual input level to the VGIC
225 * asserted, then we have already told the VGIC what we need to, and 225 * asserted, then we have already told the VGIC what we need to, and
226 * we don't need to exit from the guest until the guest deactivates 226 * we don't need to exit from the guest until the guest deactivates
227 * the already injected interrupt, so therefore we should set the 227 * the already injected interrupt, so therefore we should set the
228 * hardware active state to prevent unnecessary exits from the guest. 228 * hardware active state to prevent unnecessary exits from the guest.
229 * 229 *
230 * Conversely, if the virtual input level is deasserted, then always 230 * Also, if we enter the guest with the virtual timer interrupt active,
231 * clear the hardware active state to ensure that hardware interrupts 231 * then it must be active on the physical distributor, because we set
232 * from the timer triggers a guest exit. 232 * the HW bit and the guest must be able to deactivate the virtual and
233 */ 233 * physical interrupt at the same time.
234 if (timer->irq.level) 234 *
235 * Conversely, if the virtual input level is deasserted and the virtual
236 * interrupt is not active, then always clear the hardware active state
237 * to ensure that hardware interrupts from the timer triggers a guest
238 * exit.
239 */
240 if (timer->irq.level || kvm_vgic_map_is_active(vcpu, timer->map))
235 phys_active = true; 241 phys_active = true;
236 else 242 else
237 phys_active = false; 243 phys_active = false;
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 533538385d5d..65461f821a75 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1096,6 +1096,27 @@ static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu)
1096 vgic_set_lr(vcpu, lr_nr, vlr); 1096 vgic_set_lr(vcpu, lr_nr, vlr);
1097} 1097}
1098 1098
1099static bool dist_active_irq(struct kvm_vcpu *vcpu)
1100{
1101 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1102
1103 return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu);
1104}
1105
1106bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map)
1107{
1108 int i;
1109
1110 for (i = 0; i < vcpu->arch.vgic_cpu.nr_lr; i++) {
1111 struct vgic_lr vlr = vgic_get_lr(vcpu, i);
1112
1113 if (vlr.irq == map->virt_irq && vlr.state & LR_STATE_ACTIVE)
1114 return true;
1115 }
1116
1117 return dist_active_irq(vcpu);
1118}
1119
1099/* 1120/*
1100 * An interrupt may have been disabled after being made pending on the 1121 * An interrupt may have been disabled after being made pending on the
1101 * CPU interface (the classic case is a timer running while we're 1122 * CPU interface (the classic case is a timer running while we're
@@ -1248,7 +1269,7 @@ static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1248 * may have been serviced from another vcpu. In all cases, 1269 * may have been serviced from another vcpu. In all cases,
1249 * move along. 1270 * move along.
1250 */ 1271 */
1251 if (!kvm_vgic_vcpu_pending_irq(vcpu) && !kvm_vgic_vcpu_active_irq(vcpu)) 1272 if (!kvm_vgic_vcpu_pending_irq(vcpu) && !dist_active_irq(vcpu))
1252 goto epilog; 1273 goto epilog;
1253 1274
1254 /* SGIs */ 1275 /* SGIs */
@@ -1396,25 +1417,13 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1396static bool vgic_sync_hwirq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr) 1417static bool vgic_sync_hwirq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr)
1397{ 1418{
1398 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 1419 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1399 struct irq_phys_map *map;
1400 bool phys_active;
1401 bool level_pending; 1420 bool level_pending;
1402 int ret;
1403 1421
1404 if (!(vlr.state & LR_HW)) 1422 if (!(vlr.state & LR_HW))
1405 return false; 1423 return false;
1406 1424
1407 map = vgic_irq_map_search(vcpu, vlr.irq); 1425 if (vlr.state & LR_STATE_ACTIVE)
1408 BUG_ON(!map); 1426 return false;
1409
1410 ret = irq_get_irqchip_state(map->irq,
1411 IRQCHIP_STATE_ACTIVE,
1412 &phys_active);
1413
1414 WARN_ON(ret);
1415
1416 if (phys_active)
1417 return 0;
1418 1427
1419 spin_lock(&dist->lock); 1428 spin_lock(&dist->lock);
1420 level_pending = process_queued_irq(vcpu, lr, vlr); 1429 level_pending = process_queued_irq(vcpu, lr, vlr);
@@ -1479,17 +1488,6 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
1479 return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu); 1488 return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
1480} 1489}
1481 1490
1482int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu)
1483{
1484 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1485
1486 if (!irqchip_in_kernel(vcpu->kvm))
1487 return 0;
1488
1489 return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu);
1490}
1491
1492
1493void vgic_kick_vcpus(struct kvm *kvm) 1491void vgic_kick_vcpus(struct kvm *kvm)
1494{ 1492{
1495 struct kvm_vcpu *vcpu; 1493 struct kvm_vcpu *vcpu;